From f23e05711df289700ce9a3d77b2c85dfdb0d2b65 Mon Sep 17 00:00:00 2001 From: <> Date: Mon, 5 Aug 2024 15:01:30 +0000 Subject: [PATCH] Deployed 4d4757d with MkDocs version: 1.6.0 --- .nojekyll | 0 404.html | 4504 +++++++++ .../all-in-one-local-machine/index.html | 4560 +++++++++ Deployment/index.html | 4787 ++++++++++ .../private-offline-deployment/index.html | 4848 ++++++++++ Deployment/trial-on-cloud/index.html | 4806 ++++++++++ Deployment/your-cloud/index.html | 4659 ++++++++++ Modules/API/main/index.html | 4650 ++++++++++ Modules/Agent/index.html | 4692 ++++++++++ Modules/Client/Listener/index.html | 4707 ++++++++++ Modules/Client/Responder/index.html | 4561 +++++++++ Modules/index.html | 4565 +++++++++ Source/index.html | 4759 ++++++++++ Sources/API/api/asgi/index.html | 4640 ++++++++++ Sources/API/api/settings/index.html | 4642 ++++++++++ Sources/API/api/urls/index.html | 4649 ++++++++++ Sources/API/api/wsgi/index.html | 4640 ++++++++++ Sources/API/authenticate/admin/index.html | 4635 ++++++++++ Sources/API/authenticate/apps/index.html | 4635 ++++++++++ .../migrations/0001_init/index.html | 4575 +++++++++ Sources/API/authenticate/models/index.html | 4635 ++++++++++ .../API/authenticate/serializers/index.html | 4635 ++++++++++ Sources/API/authenticate/tests/index.html | 4619 ++++++++++ Sources/API/authenticate/urls/index.html | 4635 ++++++++++ .../utils/fire_and_forget/index.html | 4693 ++++++++++ .../authenticate/utils/get_logger/index.html | 4637 ++++++++++ .../API/authenticate/utils/timer/index.html | 4986 ++++++++++ Sources/API/authenticate/views/index.html | 4777 ++++++++++ Sources/API/hardware/admin/index.html | 5067 ++++++++++ Sources/API/hardware/apps/index.html | 4635 ++++++++++ Sources/API/hardware/forms/index.html | 4635 ++++++++++ .../hardware/migrations/0001_init/index.html | 4575 +++++++++ .../migrations/0002_add_rag/index.html | 4575 +++++++++ Sources/API/hardware/models/index.html | 5744 ++++++++++++ Sources/API/hardware/serializers/index.html | 4635 ++++++++++ Sources/API/hardware/signals/index.html | 4691 ++++++++++ Sources/API/hardware/tests/index.html | 4619 ++++++++++ Sources/API/hardware/urls/index.html | 4635 ++++++++++ Sources/API/hardware/views/index.html | 5570 +++++++++++ Sources/API/llm/admin/index.html | 4635 ++++++++++ Sources/API/llm/apps/index.html | 4635 ++++++++++ Sources/API/llm/llm/config/index.html | 4637 ++++++++++ .../commands/check_models/index.html | 4868 ++++++++++ .../API/llm/migrations/0001_init/index.html | 4575 +++++++++ Sources/API/llm/models/index.html | 4954 ++++++++++ Sources/API/llm/serializers/index.html | 4635 ++++++++++ Sources/API/llm/tests/index.html | 4619 ++++++++++ Sources/API/llm/urls/index.html | 4635 ++++++++++ Sources/API/llm/views/index.html | 4839 ++++++++++ Sources/API/manage/index.html | 4697 ++++++++++ Sources/API/orchestrator/admin/index.html | 4635 ++++++++++ Sources/API/orchestrator/apps/index.html | 4635 ++++++++++ .../orchestrator/chain/clusters/index.html | 4874 ++++++++++ .../completed_emotion_detection/index.html | 4789 ++++++++++ .../chain/completed_hf_llm/index.html | 4744 ++++++++++ .../chain/completed_openai_gpt_35/index.html | 4741 ++++++++++ .../index.html | 4745 ++++++++++ .../index.html | 4741 ++++++++++ .../completed_openai_speech2text/index.html | 4849 ++++++++++ .../completed_openai_text2speech/index.html | 4802 ++++++++++ .../completed_quantization_llm/index.html | 4744 ++++++++++ .../chain/completed_rag/index.html | 4744 ++++++++++ .../chain/completed_speech2text/index.html | 4830 ++++++++++ .../chain/completed_task/index.html | 4821 ++++++++++ .../chain/completed_text2speech/index.html | 4800 ++++++++++ .../chain/created_data_text/index.html | 4879 ++++++++++ .../API/orchestrator/chain/manager/index.html | 5539 +++++++++++ .../API/orchestrator/chain/models/index.html | 4637 ++++++++++ .../API/orchestrator/chain/signals/index.html | 4637 ++++++++++ .../API/orchestrator/chain/utils/index.html | 5153 +++++++++++ .../metrics/accuracy_benchmark/index.html | 7816 ++++++++++++++++ .../metrics/latency_benchmark/index.html | 8204 +++++++++++++++++ .../API/orchestrator/metrics/utils/index.html | 4938 ++++++++++ .../migrations/0001_init/index.html | 4575 +++++++++ Sources/API/orchestrator/models/index.html | 5306 +++++++++++ .../API/orchestrator/serializers/index.html | 4635 ++++++++++ Sources/API/orchestrator/tests/index.html | 4619 ++++++++++ Sources/API/orchestrator/urls/index.html | 4635 ++++++++++ Sources/API/orchestrator/views/index.html | 5549 +++++++++++ Sources/Agent/main/index.html | 5980 ++++++++++++ Sources/Agent/models/parameters/index.html | 4635 ++++++++++ Sources/Agent/models/results/index.html | 4635 ++++++++++ Sources/Agent/models/task/index.html | 4746 ++++++++++ Sources/Agent/models/track_type/index.html | 4635 ++++++++++ .../features_extraction/index.html | 5321 +++++++++++ .../emotion_detection/handler/index.html | 5360 +++++++++++ .../emotion_detection/sentiment/index.html | 5093 ++++++++++ .../modules/general_ml/handler/index.html | 5153 +++++++++++ .../modules/general_ml/ml_models/index.html | 4637 ++++++++++ .../Agent/modules/hf_llm/handler/index.html | 4986 ++++++++++ .../Agent/modules/openai/handler/index.html | 6236 +++++++++++++ .../adaptor_worker/index.html | 5231 +++++++++++ .../quantization_llm/handler/index.html | 5197 +++++++++++ .../quantization_llm/models/index.html | 5150 +++++++++++ Sources/Agent/modules/rag/handler/index.html | 4830 ++++++++++ .../modules/rag/neo4j_connector/index.html | 4637 ++++++++++ .../rag/postgresql_connector/index.html | 4637 ++++++++++ .../speech_to_text/speech2text/index.html | 5408 +++++++++++ .../text_to_speech/text2speech/index.html | 5308 +++++++++++ Sources/Agent/setup/index.html | 4633 ++++++++++ Sources/Agent/storage/index.html | 6076 ++++++++++++ Sources/Agent/utils/api/index.html | 5921 ++++++++++++ Sources/Agent/utils/aws/index.html | 4635 ++++++++++ Sources/Agent/utils/constants/index.html | 4691 ++++++++++ Sources/Agent/utils/get_logger/index.html | 4719 ++++++++++ .../utils/storage/api_sync_handler/index.html | 4757 ++++++++++ .../storage/local_sync_handler/index.html | 5024 ++++++++++ .../utils/storage/s3_sync_handler/index.html | 4759 ++++++++++ Sources/Agent/utils/time_logger/index.html | 4955 ++++++++++ Sources/Agent/utils/time_tracker/index.html | 4713 ++++++++++ Sources/Agent/utils/timer/index.html | 4984 ++++++++++ Sources/Client/Listener/api/index.html | 6006 ++++++++++++ .../Client/Listener/audios_acquire/index.html | 5531 +++++++++++ Sources/Client/Listener/constants/index.html | 4709 ++++++++++ .../Listener/mock/data_extraction/index.html | 5659 ++++++++++++ Sources/Client/Listener/setup/index.html | 4575 +++++++++ Sources/Client/Listener/storage/index.html | 5676 ++++++++++++ Sources/Client/Listener/utils/index.html | 5131 +++++++++++ .../Client/Listener/videos_acquire/index.html | 5296 +++++++++++ Sources/Client/Responder/api/index.html | 5093 ++++++++++ Sources/Client/Responder/constants/index.html | 4681 ++++++++++ .../Client/Responder/play_speech/index.html | 5080 ++++++++++ Sources/Client/Responder/setup/index.html | 4575 +++++++++ Sources/Client/Responder/utils/index.html | 5212 +++++++++++ Tutorial/annotation_customisation/index.html | 4852 ++++++++++ Tutorial/benchmark_and_annotation/index.html | 4741 ++++++++++ Tutorial/case_study/index.html | 4733 ++++++++++ Tutorial/index.html | 4564 +++++++++ Tutorial/pipeline_customisation/index.html | 5207 +++++++++++ Tutorial/setup/index.html | 4671 ++++++++++ Tutorial/video_demo/index.html | 4683 ++++++++++ assets/_mkdocstrings.css | 119 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.fe8b6f2b.min.js | 29 + assets/javascripts/bundle.fe8b6f2b.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 ++++++++++++++ .../workers/search.b8dbb3d2.min.js | 42 + .../workers/search.b8dbb3d2.min.js.map | 7 + assets/stylesheets/main.3cba04c6.min.css | 1 + assets/stylesheets/main.3cba04c6.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + images/AI4WA.png | Bin 0 -> 14394 bytes images/AI4WA.svg | 50 + images/ArchitectureDesign.jpg | Bin 0 -> 1820562 bytes images/Audio.png | Bin 0 -> 165437 bytes images/GPT-4o.jpg | Bin 0 -> 887481 bytes images/OpenOmni.jpg | Bin 0 -> 1042337 bytes images/Tasks.png | Bin 0 -> 175612 bytes images/Triangle.jpg | Bin 0 -> 718181 bytes images/VoiceE2E.jpg | Bin 0 -> 843661 bytes images/accuracy_detail_progress.png | Bin 0 -> 126082 bytes images/accuracy_detail_results.png | Bin 0 -> 72201 bytes images/accuracy_overall.png | Bin 0 -> 38377 bytes images/accuracy_summary_1.png | Bin 0 -> 17997 bytes images/accuracy_summary_2.png | Bin 0 -> 36674 bytes images/accuracy_summary_3.png | Bin 0 -> 38707 bytes images/accuracy_summary_4.png | Bin 0 -> 22378 bytes images/add_token.png | Bin 0 -> 58760 bytes images/ai_running.png | Bin 0 -> 86266 bytes images/annotation_metrics.png | Bin 0 -> 71349 bytes images/annotation_overview.png | Bin 0 -> 308207 bytes images/annotation_progress_detail.png | Bin 0 -> 68608 bytes images/assign_tag.png | Bin 0 -> 122614 bytes images/audio_cli.png | Bin 0 -> 60775 bytes images/audio_speech.png | Bin 0 -> 83793 bytes images/benchmark_detail.png | Bin 0 -> 235892 bytes images/benchmark_summary.png | Bin 0 -> 307970 bytes images/client.jpg | Bin 0 -> 307482 bytes images/conversation_data.png | Bin 0 -> 274283 bytes images/detailed_latency.png | Bin 0 -> 259074 bytes images/emotion_annotation.png | Bin 0 -> 316051 bytes images/favicon.ico | Bin 0 -> 15406 bytes images/full_tasks.png | Bin 0 -> 413688 bytes images/gpt-4o-assistance.png | Bin 0 -> 235892 bytes images/gpt-4o.png | Bin 0 -> 307970 bytes images/gpt4oaccuracy.png | Bin 0 -> 72201 bytes images/grab_token.png | Bin 0 -> 81577 bytes images/individual_conversation.png | Bin 0 -> 310271 bytes images/latency_summary_stat.png | Bin 0 -> 201145 bytes images/model_data.png | Bin 0 -> 22110 bytes images/multi-turn-conversation.png | Bin 0 -> 39012 bytes images/multi-turn.png | Bin 0 -> 100725 bytes images/task_record.png | Bin 0 -> 165038 bytes images/video.png | Bin 0 -> 202117 bytes images/video_cli.png | Bin 0 -> 69024 bytes index.html | 5270 +++++++++++ objects.inv | Bin 0 -> 3534 bytes search/search_index.json | 1 + sitemap.xml | 653 ++ sitemap.xml.gz | Bin 0 -> 1208 bytes 224 files changed, 653597 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 Deployment/all-in-one-local-machine/index.html create mode 100644 Deployment/index.html create mode 100644 Deployment/private-offline-deployment/index.html create mode 100644 Deployment/trial-on-cloud/index.html create mode 100644 Deployment/your-cloud/index.html create mode 100644 Modules/API/main/index.html create mode 100644 Modules/Agent/index.html create mode 100644 Modules/Client/Listener/index.html create mode 100644 Modules/Client/Responder/index.html create mode 100644 Modules/index.html create mode 100644 Source/index.html create mode 100644 Sources/API/api/asgi/index.html create mode 100644 Sources/API/api/settings/index.html create mode 100644 Sources/API/api/urls/index.html create mode 100644 Sources/API/api/wsgi/index.html create mode 100644 Sources/API/authenticate/admin/index.html create mode 100644 Sources/API/authenticate/apps/index.html create mode 100644 Sources/API/authenticate/migrations/0001_init/index.html create mode 100644 Sources/API/authenticate/models/index.html create mode 100644 Sources/API/authenticate/serializers/index.html create mode 100644 Sources/API/authenticate/tests/index.html create mode 100644 Sources/API/authenticate/urls/index.html create mode 100644 Sources/API/authenticate/utils/fire_and_forget/index.html create mode 100644 Sources/API/authenticate/utils/get_logger/index.html create mode 100644 Sources/API/authenticate/utils/timer/index.html create mode 100644 Sources/API/authenticate/views/index.html create mode 100644 Sources/API/hardware/admin/index.html create mode 100644 Sources/API/hardware/apps/index.html create mode 100644 Sources/API/hardware/forms/index.html create mode 100644 Sources/API/hardware/migrations/0001_init/index.html create mode 100644 Sources/API/hardware/migrations/0002_add_rag/index.html create mode 100644 Sources/API/hardware/models/index.html create mode 100644 Sources/API/hardware/serializers/index.html create mode 100644 Sources/API/hardware/signals/index.html create mode 100644 Sources/API/hardware/tests/index.html create mode 100644 Sources/API/hardware/urls/index.html create mode 100644 Sources/API/hardware/views/index.html create mode 100644 Sources/API/llm/admin/index.html create mode 100644 Sources/API/llm/apps/index.html create mode 100644 Sources/API/llm/llm/config/index.html create mode 100644 Sources/API/llm/management/commands/check_models/index.html create mode 100644 Sources/API/llm/migrations/0001_init/index.html create mode 100644 Sources/API/llm/models/index.html create mode 100644 Sources/API/llm/serializers/index.html create mode 100644 Sources/API/llm/tests/index.html create mode 100644 Sources/API/llm/urls/index.html create mode 100644 Sources/API/llm/views/index.html create mode 100644 Sources/API/manage/index.html create mode 100644 Sources/API/orchestrator/admin/index.html create mode 100644 Sources/API/orchestrator/apps/index.html create mode 100644 Sources/API/orchestrator/chain/clusters/index.html create mode 100644 Sources/API/orchestrator/chain/completed_emotion_detection/index.html create mode 100644 Sources/API/orchestrator/chain/completed_hf_llm/index.html create mode 100644 Sources/API/orchestrator/chain/completed_openai_gpt_35/index.html create mode 100644 Sources/API/orchestrator/chain/completed_openai_gpt_4o_text_and_image/index.html create mode 100644 Sources/API/orchestrator/chain/completed_openai_gpt_4o_text_only/index.html create mode 100644 Sources/API/orchestrator/chain/completed_openai_speech2text/index.html create mode 100644 Sources/API/orchestrator/chain/completed_openai_text2speech/index.html create mode 100644 Sources/API/orchestrator/chain/completed_quantization_llm/index.html create mode 100644 Sources/API/orchestrator/chain/completed_rag/index.html create mode 100644 Sources/API/orchestrator/chain/completed_speech2text/index.html create mode 100644 Sources/API/orchestrator/chain/completed_task/index.html create mode 100644 Sources/API/orchestrator/chain/completed_text2speech/index.html create mode 100644 Sources/API/orchestrator/chain/created_data_text/index.html create mode 100644 Sources/API/orchestrator/chain/manager/index.html create mode 100644 Sources/API/orchestrator/chain/models/index.html create mode 100644 Sources/API/orchestrator/chain/signals/index.html create mode 100644 Sources/API/orchestrator/chain/utils/index.html create mode 100644 Sources/API/orchestrator/metrics/accuracy_benchmark/index.html create mode 100644 Sources/API/orchestrator/metrics/latency_benchmark/index.html create mode 100644 Sources/API/orchestrator/metrics/utils/index.html create mode 100644 Sources/API/orchestrator/migrations/0001_init/index.html create mode 100644 Sources/API/orchestrator/models/index.html create mode 100644 Sources/API/orchestrator/serializers/index.html create mode 100644 Sources/API/orchestrator/tests/index.html create mode 100644 Sources/API/orchestrator/urls/index.html create mode 100644 Sources/API/orchestrator/views/index.html create mode 100644 Sources/Agent/main/index.html create mode 100644 Sources/Agent/models/parameters/index.html create mode 100644 Sources/Agent/models/results/index.html create mode 100644 Sources/Agent/models/task/index.html create mode 100644 Sources/Agent/models/track_type/index.html create mode 100644 Sources/Agent/modules/emotion_detection/features_extraction/index.html create mode 100644 Sources/Agent/modules/emotion_detection/handler/index.html create mode 100644 Sources/Agent/modules/emotion_detection/sentiment/index.html create mode 100644 Sources/Agent/modules/general_ml/handler/index.html create mode 100644 Sources/Agent/modules/general_ml/ml_models/index.html create mode 100644 Sources/Agent/modules/hf_llm/handler/index.html create mode 100644 Sources/Agent/modules/openai/handler/index.html create mode 100644 Sources/Agent/modules/quantization_llm/adaptor_worker/index.html create mode 100644 Sources/Agent/modules/quantization_llm/handler/index.html create mode 100644 Sources/Agent/modules/quantization_llm/models/index.html create mode 100644 Sources/Agent/modules/rag/handler/index.html create mode 100644 Sources/Agent/modules/rag/neo4j_connector/index.html create mode 100644 Sources/Agent/modules/rag/postgresql_connector/index.html create mode 100644 Sources/Agent/modules/speech_to_text/speech2text/index.html create mode 100644 Sources/Agent/modules/text_to_speech/text2speech/index.html create mode 100644 Sources/Agent/setup/index.html create mode 100644 Sources/Agent/storage/index.html create mode 100644 Sources/Agent/utils/api/index.html create mode 100644 Sources/Agent/utils/aws/index.html create mode 100644 Sources/Agent/utils/constants/index.html create mode 100644 Sources/Agent/utils/get_logger/index.html create mode 100644 Sources/Agent/utils/storage/api_sync_handler/index.html create mode 100644 Sources/Agent/utils/storage/local_sync_handler/index.html create mode 100644 Sources/Agent/utils/storage/s3_sync_handler/index.html create mode 100644 Sources/Agent/utils/time_logger/index.html create mode 100644 Sources/Agent/utils/time_tracker/index.html create mode 100644 Sources/Agent/utils/timer/index.html create mode 100644 Sources/Client/Listener/api/index.html create mode 100644 Sources/Client/Listener/audios_acquire/index.html create mode 100644 Sources/Client/Listener/constants/index.html create mode 100644 Sources/Client/Listener/mock/data_extraction/index.html create mode 100644 Sources/Client/Listener/setup/index.html create mode 100644 Sources/Client/Listener/storage/index.html create mode 100644 Sources/Client/Listener/utils/index.html create mode 100644 Sources/Client/Listener/videos_acquire/index.html create mode 100644 Sources/Client/Responder/api/index.html create mode 100644 Sources/Client/Responder/constants/index.html create mode 100644 Sources/Client/Responder/play_speech/index.html create mode 100644 Sources/Client/Responder/setup/index.html create mode 100644 Sources/Client/Responder/utils/index.html create mode 100644 Tutorial/annotation_customisation/index.html create mode 100644 Tutorial/benchmark_and_annotation/index.html create mode 100644 Tutorial/case_study/index.html create mode 100644 Tutorial/index.html create mode 100644 Tutorial/pipeline_customisation/index.html create mode 100644 Tutorial/setup/index.html create mode 100644 Tutorial/video_demo/index.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.fe8b6f2b.min.js create mode 100644 assets/javascripts/bundle.fe8b6f2b.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js.map create mode 100644 assets/stylesheets/main.3cba04c6.min.css create mode 100644 assets/stylesheets/main.3cba04c6.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 images/AI4WA.png create mode 100644 images/AI4WA.svg create mode 100644 images/ArchitectureDesign.jpg create mode 100644 images/Audio.png create mode 100644 images/GPT-4o.jpg create mode 100644 images/OpenOmni.jpg create mode 100644 images/Tasks.png create mode 100644 images/Triangle.jpg create mode 100644 images/VoiceE2E.jpg create mode 100644 images/accuracy_detail_progress.png create mode 100644 images/accuracy_detail_results.png create mode 100644 images/accuracy_overall.png create mode 100644 images/accuracy_summary_1.png create mode 100644 images/accuracy_summary_2.png create mode 100644 images/accuracy_summary_3.png create mode 100644 images/accuracy_summary_4.png create mode 100644 images/add_token.png create mode 100644 images/ai_running.png create mode 100644 images/annotation_metrics.png create mode 100644 images/annotation_overview.png create mode 100644 images/annotation_progress_detail.png create mode 100644 images/assign_tag.png create mode 100644 images/audio_cli.png create mode 100644 images/audio_speech.png create mode 100644 images/benchmark_detail.png create mode 100644 images/benchmark_summary.png create mode 100644 images/client.jpg create mode 100644 images/conversation_data.png create mode 100644 images/detailed_latency.png create mode 100644 images/emotion_annotation.png create mode 100644 images/favicon.ico create mode 100644 images/full_tasks.png create mode 100644 images/gpt-4o-assistance.png create mode 100644 images/gpt-4o.png create mode 100644 images/gpt4oaccuracy.png create mode 100644 images/grab_token.png create mode 100644 images/individual_conversation.png create mode 100644 images/latency_summary_stat.png create mode 100644 images/model_data.png create mode 100644 images/multi-turn-conversation.png create mode 100644 images/multi-turn.png create mode 100644 images/task_record.png create mode 100644 images/video.png create mode 100644 images/video_cli.png create mode 100644 index.html create mode 100644 objects.inv create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..146e5121 --- /dev/null +++ b/404.html @@ -0,0 +1,4504 @@ + + + + + + + + + + + + + + + + + + + OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Deployment/all-in-one-local-machine/index.html b/Deployment/all-in-one-local-machine/index.html new file mode 100644 index 00000000..28d75531 --- /dev/null +++ b/Deployment/all-in-one-local-machine/index.html @@ -0,0 +1,4560 @@ + + + + + + + + + + + + + + + + + + + + + + + + + All in One Local Machine - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

All in One Local Machine

+

It will be what we have introduced in the Tutorial/setup Section.

+

Shared Access of the Files (videos, images and audios)

+

Under this mode, all the files will be shared on the same machine via the docker volume and file system, so there is no +need to transfer the video and audio data between different machines, to ensure API/Agent/Client all have access to the +files.

+

You can check it from here: Tutorial/setup

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Deployment/index.html b/Deployment/index.html new file mode 100644 index 00000000..2efad79c --- /dev/null +++ b/Deployment/index.html @@ -0,0 +1,4787 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Table of Content - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Deployment Guide

+

As we suggest in the introduction, we have four modes of deployment:

+ +

If you want to easily get start, you can use our deployed API, the link +is https://openomni.ai4wa.com, to manage the tasks.

+

If you want to test out the full setup locally, you can follow the guide in the All in One Local Machine section.

+

If you are thinking about deploy it as a product, which is fully locally within a home network, addressing the privacy +issue, you can follow the guide in the Private Offline Deployment section.

+

If you are doing research with cluster of computing resources, or you want annotators to work on the same platform for a +serious project, you can follow the guide in the Your Cloud section.

+
+

Modules

+

We have three components in the stack to deploy:

+
    +
  • API
  • +
  • Agent
  • +
  • Client
      +
    • Listener (Audio and Video)
    • +
    • Responder (Audio)
    • +
    +
  • +
+

API

+
    +
  • Required Resource
      +
    • A server (If on cloud will require a Public IP)
    • +
    • Minimum 1 CPU and 1 GB RAM
    • +
    • Deployment method:
        +
      • For cloud server: Docker + Docker Compose + Nginx
      • +
      • For local server: Docker + Docker Compose
      • +
      +
    • +
    +
  • +
+

Agent

+
    +
  • Required Resource
      +
    • Any high-end computational Nvidia GPU resources
        +
      • Can be HPC Clusters
      • +
      • Can work on demand, which means, you can spin the tasks when needed
      • +
      • Can work on CPU as well, but the performance will be degraded
      • +
      +
    • +
    • Minimum storage of 500 GB
        +
      • This is required to store the models and the data, especially the LLM models
      • +
      +
    • +
    • Python 3.8+
    • +
    +
  • +
+

Client

+
    +
  • Required Resource:
      +
    • Hardware:
        +
      • Microphone: To gather the audio data
      • +
      • Camera: To gather the video data
      • +
      • Speaker: To play the audio data
      • +
      • Minimum 2 GB RAM
      • +
      • Minimum 1 CPU
      • +
      • Minimum 32 GB storage
      • +
      • It can be running on a laptop, or working with a Raspberry Pi
      • +
      +
    • +
    • Python 3.8+
    • +
    +
  • +
+

Something like this

+

client

+

Storage solution

+

All the metadata will be communicated via the API, so here we need to think about how can we share the video and audio +data between Agent/Client/API.

+

We have four STORAGE_SOLUTION for this four different scenarios:

+
    +
  • api: audio and video data will be upload and download via api endpoint, this is for the trial on cloud.
  • +
  • volume: all the files will be shared on the same machine via the docker volume and file system, so there is no + need to sync anything
  • +
  • local: all the modules will be deployed on the same local network, but different machines, so we need to sync the + data between them, with rsync
  • +
  • s3: API is on your cloud, Agent is anywhere, so we will use s3 to be the storage place for the data, to make sure + stable + and fast access.
  • +
+

To switch between these four modes, all you need to do is to set the STORAGE_SOLUTION environment variable before +start the API

+
export STORAGE_SOLUTION=api
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Deployment/private-offline-deployment/index.html b/Deployment/private-offline-deployment/index.html new file mode 100644 index 00000000..c1e6ab2c --- /dev/null +++ b/Deployment/private-offline-deployment/index.html @@ -0,0 +1,4848 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Private Offline Deployment - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Private Offline Deployment

+

It is to offload the Agent/Client/API modules into different machines however within the same network, for potential use +cases like privacy concerns, or to have a more robust system.

+

One of the example deployment scenario for this is:

+
    +
  • Client: Raspberry Pi to gather the audio and video data
  • +
  • API: A server to run the API, which can be a laptop or a desktop
  • +
  • Agent: A PC with Nvidia GPU to run the Agent models
  • +
+

Local Network File Sync

+

To ensure the API/Agent/Client all have access to the files, we will need to sync the files between different machines.

+
    +
  • Client to API: Audio and Video data
  • +
  • API to Agent: Audio and Video data
  • +
  • Agent to API: Speech audio data
  • +
+

As other deployment methods, we will first deploy the API module.

+

Step 1: Get API running

+

Login to the machine your API will deploy on, and clone the repo:

+
git clone git@github.com:AI4WA/OpenOmniFramework.git
+cd ./OpenOmniFramework
+cd ./API
+export STORAGE_SOLUTION=local # this is for local mode
+
+# Run it inside docker, this is the easiest way to get started
+docker compose up
+
+

Get your private ip of this machine.

+

For Mac:

+
ipconfig getifaddr en0
+
+

For Linux:

+
hostname -I
+
+

For Windows:

+
ipconfig
+
+

After this, you should be able to access the API at http://<private-ip>:8000 for any device within the same network.

+

Step 2: Get the token

+

Login to the API admin, go to http://<private-ip>:8000/authtoken/tokenproxy/ and click Add Token.

+

Add Token

+

Step 3: Sync the files between different machines

+

If you are a Linux or Mac for API module, then you can use rsync to sync the files between different machines.

+

In this way, all you need to do is to start a new terminal and run the following command:

+
cd ./OpenOmniFramework
+cd ./Client/Listener
+
+source venv/bin/activate
+
+# under this way, STORAGE_SOLUTION in API is local mode
+# sync the audio and video data to the API machine
+python3 storage.py --token your_token_from_step_2 --dest_dir api_machine_user@api_private_ip:/where/api/folder/is/Client/Listener/data --dest_password api_machine_password
+
+

If you are a Windows user, you can use the api mode storage solution to sync the files between different machines.

+

All you need to do is in Step 1, before starting the API, you need to run the following command:

+
export API_STORAGE_MODE=api
+
+

And then within the Listener module, you can run the following command:

+
cd ./OpenOmniFramework
+cd ./Client/Listener
+
+source venv/bin/activate
+
+# sync the audio and video data to the API machine
+python3 storage.py --token your_token_from_step_2 --api_domain http://<private-ip>:8000
+
+

However, this way will be a bit slower than the rsync way, but should not be noticeable for testing purposes.

+

Step 4: Collect Audio and Video Data

+

Login to the machine your Client will deploy on, which should have the Camera, Microphone and Speaker, and clone the +repo:

+
# switch to a proper directory
+git clone git@github.com:AI4WA/OpenOmniFramework.git
+
+

For the Listener part, you will need to run the following commands:

+
cd ./OpenOmniFramework
+cd ./Client/Listener
+
+export DISPLAY=:0.0 # THIS IS SPECIFIC FOR RASPBERRY PI
+
+# create the virtual environment if this is your first time run this
+python3 -m venv venv
+source venv/bin/activate
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+# run video acquire
+python3 videos_acquire.py --token your_token_from_step_2 --api_domain http://<private-ip>:8000
+
+

You should be able to see something like this:

+

video_cli

+

Then open a new terminal

+
cd ./OpenOmniFramework
+cd ./Client/Listener
+
+source venv/bin/activate
+
+# run audio acquire
+python3 audios_acquire.py --token your_token_from_step_2 --track_cluster CLUSTER_GPT_4O_ETE_CONVERSATION  --api_domain http://<private-ip>:8000
+
+# you can change the cluster to the one your need
+
+

You will see something like this:

+

audio_cli

+

Step 5: Run Agent models

+

Login to the machine your Agent will deploy on, and clone the repo:

+
# switch to a proper directory
+git clone git@github.com:AI4WA/OpenOmniFramework.git
+
+

Before you start the Agent, you will also need to first sort out the file sync between the API and Agent machine.

+

Same as above, if you are a Linux or Mac user, you can use rsync to sync the files between different machines.

+
cd ./OpenOmniFramework
+cd ./Agent
+
+python3 -m venv venv
+
+source venv/bin/activate
+
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+# run storage sync from API to Agent, both direction 
+
+python3 storage.py --token your_token_from_step_2 --api_domain http://<private-ip>:8000 --dest_dir api_machine_user@api_private_ip:/where/api/folder/is/OpenOmniFramework/Agent/data --dest_password api_machine_password
+
+

And then you are free to run the Agent models.

+
cd ./OpenOmniFramework
+cd ./Agent
+
+source venv/bin/activate
+
+# run the Agent models
+
+python3 main.py --token your_token_from_step_2 --api_domain http://<private-ip>:8000
+
+

Step 6: Play the response

+

The speech will be feed with the url, so it is fine, the complex logic is handled within the API side.

+
cd ./OpenOmniFramework
+cd ./Client/Responder
+
+# create the virtual environment if this is your first time run this
+python3 -m venv venv
+source venv/bin/activate
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+# run the audio player
+
+python3 play_speech.py --token your_token
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Deployment/trial-on-cloud/index.html b/Deployment/trial-on-cloud/index.html new file mode 100644 index 00000000..0bddc970 --- /dev/null +++ b/Deployment/trial-on-cloud/index.html @@ -0,0 +1,4806 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Trial on Cloud - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Trial On Cloud

+

In this one, the STORAGE_SOLUTION is api, which means the audio and video data will be uploaded and downloaded via +the API endpoint.

+

Step 0: Get a token

+

Our deployed API is on https://openomni.ai4wa.com, you can use it to manage the tasks.

+

Login with username admin and password password, do not change the password, as it is a shared account.

+

Then you are free to create a new account for yourself.

+

And then use your own account to create a Token.

+

add_token

+

So here all you need to do is deploy the Client and Agent part.

+

Step 1: Clone the repository

+
# switch to a proper directory
+git clone git@github.com:AI4WA/OpenOmniFramework.git
+
+

Step 2: Get Data Sync

+
cd ./OpenOmniFramework
+cd ./Client/Listener
+# create the virtual environment if this is your first time run this
+python3 -m venv venv
+source venv/bin/activate
+
+
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+python3 storage.py --token your_token_from_step_0 --api_domain https://openomni.ai4wa.com
+
+

Step 3: Collect Audio and Video Data

+
cd ./OpenOmniFramework
+cd ./Client/Listener
+
+
+source venv/bin/activate
+
+
+# run video acquire
+python3 videos_acquire.py --token your_token_from_step_0 --api_domain https://openomni.ai4wa.com
+
+

You should be able to see something like this: +video_cli

+

Then open a new terminal

+
cd ./OpenOmniFramework
+cd ./Client/Listener
+
+# create the virtual environment if this is your first time run this
+python3 -m venv venv
+source venv/bin/activate
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+# run audio acquire
+python3 audios_acquire.py --token your_token_from_step_0 --track_cluster CLUSTER_GPT_4O_ETE_CONVERSATION  --api_domain https://openomni.ai4wa.com
+# you can change the cluster to the one your need
+
+

You will see something like this: +audio_cli

+

If everything works, you should be able to check the newly create Data Audios, Data Videos and Speech2Text Tasks +in API Admin page. +Something like below: +tasks +audio +video

+

Step 4: Run Agent models

+

Now we need to start Agent module to consume the Tasks.

+

Same as above, we will need to first run the storage sync.

+
cd ./OpenOmniFramework
+cd ./Agent
+
+python3 -m venv venv
+source venv/bin/activate
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+python3 storage.py --token your_token_from_step_0 --api_domain https://openomni.ai4wa.com
+
+

Before we start the Agent module, there are some pre configurations we need to do.

+

As provided functionalities within Agent modules support OpenAI call, HuggingFace call, and there is also our provided +emotion detection module.

+

We need to get them setup first.

+

Setup OpenAI and HuggingFace Environment Variable

+

Create a .env file in ./Agent folder, and add the following content:

+
HF_TOKEN=Your_HuggingFace_Token
+OPENAI_API_KEY=Your_OpenAI_API_KEY
+
+

Otherwise, you can run

+
export HF_TOKEN=Your_HuggingFace_Token
+export OPENAI_API_KEY=Your_OpenAI_API_KEY
+
+

For the model part, if you want to get our emotion detection model running, you will need to download the model +from download link

+

And put it in the folder: ./Agent/data/models/emotion_detection/model_data. +It should be like this

+

emotion_model

+

Then you should be ready to run the Agent module.

+
# run the Agent module
+python3 main.py --token your_token_from_step_3
+
+

You can also skip the steps to install the requirements, directly run the Agent module with docker.

+
TOKEN=XXX docker compose up
+
+

This will allow you to utilise the GPU resources on your machine if you have one.

+

ai_running

+

Until now, you will have the client side to feed the video/audio data to the API, and the Agent module to consume the data.

+

Step 5: Play speech audio in client side

+
cd ./OpenOmniFramework
+cd ./Client/Responder
+
+# create the virtual environment if this is your first time run this
+python3 -m venv venv
+source venv/bin/activate
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+# run the audio player
+
+python3 play_speech.py --token your_token_from_step_3
+
+

You will see something like this:

+

audio_play

+

Until now, you should have the whole pipeline running on your local machine.

+

You should see new tasks created as expected in the Tasks page in the API admin page. +As shown below:

+

tasks

+

And in the Detailed Latency Benchmark page, you should be able to see the latency of each round of conversation.

+

latency

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Deployment/your-cloud/index.html b/Deployment/your-cloud/index.html new file mode 100644 index 00000000..e213145f --- /dev/null +++ b/Deployment/your-cloud/index.html @@ -0,0 +1,4659 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Your Cloud - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Your Cloud

+

This will be similar to the Trail on Cloud section, only differences is that the API end is on your cloud server.

+

Under this mode, your storage solution will be s3, you will need to

+
    +
  • create a s3 bucket, and replace it to the S3_BUCKET setting in Agent/API/Client
  • +
  • create an access key and secret key, set it properly for both Agent, API and Client, refer to AWS documentation for more + details
  • +
+

After this, the first step you will need to do is deploying it to your cloud server.

+

We will assume it is a Linux Machine.

+

Step 1: Deploy the API on Cloud

+

You will need to have a cloud server, it can be AWS EC2, Azure Compute Engine or any VPS server you can access. +It will need to have a public IP address. +The demonstration about how to deploy it to a cloud server is in our CI/CD process.

+

You will need to access the server and install docker first. +Test out the command docker and docker compose to verify the installation.

+

And then you can fork our repo, and replace the IP in the .github/workflows/deploy.yml file with the public IP of your +server, also remember to set the Actions -> Secrets, add a secret with the name SERVER_PASSWORD and the value as +your server password.

+

In this way, you can continuously deploy the API to your server when code changes, and merge to the develop branch.

+

If you want to manually to do so, it is also simple, just follow the steps in the deploy.yml file. +Pull the code to your server and mainly run the command in last step:

+
cd /root 
+rm -rf omni
+mkdir omni
+tar xopf omni.tar -C omni
+cd /root/omni/API
+export STORAGE_SOLUTION=s3
+docker compose -f docker-compose.yml down
+docker compose -f docker-compose.yml up --build -d
+
+

Configuration of Nginx will be like this:

+
server {
+    server_name openomni.ai4wa.com; # replace with your domain
+    client_max_body_size 100M;
+    location / {
+        proxy_pass http://localhost:8000;
+        proxy_set_header Host $host;
+        proxy_set_header X-Real-IP $remote_addr;
+        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+        proxy_set_header X-Forwarded-Proto $scheme;
+    }
+}
+
+

Then run

+
sudo service nginx restart
+
+

Add a DNS A record for this sever for your domain, and you should be able to access the API +at http://your.domain.com.

+

Then you can follow the steps in the Trail on Cloud section to get the Agent and Client running.

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Modules/API/main/index.html b/Modules/API/main/index.html new file mode 100644 index 00000000..cb9fa11c --- /dev/null +++ b/Modules/API/main/index.html @@ -0,0 +1,4650 @@ + + + + + + + + + + + + + + + + + + + + + + + + + API - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

API

+

API will provide the central logic control and orchestration for the whole system. +It is written in Django and Django Rest Framework. +The database is PostgresSQL.

+

The apps in the API are [continue to be developed]:

+
    +
  • authenticate:
      +
    • User authentication
    • +
    • JWT token generation
    • +
    • API token generation
    • +
    +
  • +
  • hardware:
      +
    • Hardware management
    • +
    • Store audio and video data
    • +
    • Store the artifacts of the pipeline
    • +
    +
  • +
  • llm:
      +
    • Manage the configuration of the LLM models
    • +
    +
  • +
  • orchestrator:
      +
    • Manage the pipeline
    • +
    • Queue the tasks
    • +
    • Manage the pipeline hooks
    • +
    +
  • +
+

Currently, it will provide the following functionalities:

+
    +
  • admin interface: http://localhost:8000/
  • +
  • API docs: http://localhost:8000/redoc
  • +
+

If you want to add any new functionalities, it is quite easy, you just need to know how to use Django.

+

Data Storage

+
    +
  • We have a relational database, which is PostgresSQL.
  • +
  • For the audio and video data, we will store them in the file system.
  • +
  • We also include the Neo4j for future development of GraphRAG.
  • +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Modules/Agent/index.html b/Modules/Agent/index.html new file mode 100644 index 00000000..474bbfdf --- /dev/null +++ b/Modules/Agent/index.html @@ -0,0 +1,4692 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Agent - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Agent

+

The Agent component is the core of the system, which will be in charge of:

+
    +
  • Running the ML or AI models distributed in the system
      +
    • Running the models which require intensive computation
    • +
    • LLM models
    • +
    • Text2Speech models
    • +
    • Emotion Recognition models
    • +
    • etc.
    • +
    +
  • +
+

It is writen in Python, and it is a pretty standard Python project.

+

Each different task will have a subfolder within the modules folder

+

Latency Logger

+

Key thing to notice is that we create two classes to log the time point and duration to profile the latency performance +of the models.

+
    +
  • Agent/utils/time_logger.py: log time point
  • +
  • Agent/utils/time_tracker.py: track duration
  • +
+

Docker setup

+

We also setup the docker for the Agent component, which is in the Dockerfile and docker-compose.yml file.

+

Storage solution

+

How we handle the different storage solution is inside the storage.py file.

+

Data

+

As we mentioned in the introduction, models will be need to be downloaded to the data/models folder, it is normally +automatically.

+

Unless you want to run our emotion detection model, if you want to do that, refer to our introduction page.

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Modules/Client/Listener/index.html b/Modules/Client/Listener/index.html new file mode 100644 index 00000000..b6cc44d1 --- /dev/null +++ b/Modules/Client/Listener/index.html @@ -0,0 +1,4707 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Listener - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Listener

+

This is to collect the audio and video data from any devices with a camera and microphone. It can be your laptop, it +also can be your Raspberry Pi 4.

+

Collect the video is easy, just keep in the background, and record the video when needed, upload it to the API.

+

However, collect the audio is a bit tricky, which can be further enhanced.

+

Audio

+

Our solution for the audio is using the whisper model to detect when user stop talking, your can specific the energy +threshold or timeout milliseconds to determine when to stop and save this round of sound.

+

This will get the API receive the audio in a "conversation" way, speaker stop, Agent process and act, then speaker speak +again.

+

However, there are several situations are limited by current solution:

+
    +
  • multiple speakers: if we add another module to detect the speaker, then the latency will increase again
  • +
  • interrupt: if the speaker interrupt the AI, then the AI should stop and listen to the speaker again, or interrupt the + speaker, which GPT-4o is capable of doing
  • +
  • streaming: on the other end, this means the audio data should be streamed to the API, which is not supported by the + current solution
  • +
+

But it does can handle the basic conversation for research purpose.

+

There are several parameters you can specify when you start the audio listener:

+
    +
  • --api_domain: the API domain, default is http://localhost:8000, which is within the full local setup
  • +
  • --token: the token you get from the API side
  • +
  • --home_id: If you use cloud mode, you can have multiple homes to upload video and audio data, as one of the most + common user case for this could be home intelligent assistant. The home do not limit to an actual home, can be a + hospital room, etc.
  • +
  • --energy_threshold: the energy threshold to determine when to stop the audio recording, default is 5000
  • +
  • --timeout: the timeout milliseconds to determine when to stop the audio recording, default is 30000 in + milliseconds
  • +
  • default_microphone: which microphone to use if there are multiple microphones, default is pulse
  • +
  • track_cluster: the cluster you want to track, default is CLUSTER_GPT_4O_ETE_CONVERSATION
  • +
+

Video

+

Video also in theory should be streaming to a model, however, currently most models do not have the capability to take +streaming input.

+

At the same time, most model is taking the images to the model.

+

So how we design it now is:

+
    +
  • every 20 seconds or duration you specify, we will record a video for reference purpose.
  • +
  • every second, take a frame, save an image, this will be the main input for the model.
  • +
+

This is not the best solution, but it is the most practical solution for now.

+

There are several parameters you can specify when you start the video listener:

+
    +
  • --api_domain: the API domain, default is http://localhost:8000, which is within the full local setup
  • +
  • --token: the token you get from the API side
  • +
  • --home_id: If you use cloud mode, you can have multiple homes to upload video and audio data, as one of the most + common user case for this could be home intelligent assistant. The home do not limit to an actual home, can be a + hospital room, etc.
  • +
+

Then that's all, other setting if you want to customize, you can PR or change it by your own.

+

Audio/Video/Image File Sync

+

We have described the STORAGE_SOLUTION in our Deployment Options

+

The fastest way is definitely on the same machine for all modules, which actually is not practical in production. +So next option will be local network or cloud.

+
    +
  • local network, sync data to a center sever within home network
  • +
  • cloud, upload to s3 or other cloud storage, then trigger the serverless function on cloud to download the file on a + EFS, then Agent and API should both mount to the EFS, this will reduce the
  • +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Modules/Client/Responder/index.html b/Modules/Client/Responder/index.html new file mode 100644 index 00000000..f27307b5 --- /dev/null +++ b/Modules/Client/Responder/index.html @@ -0,0 +1,4561 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Responder - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Responder

+

All it does is pulling the API end to figure out whether there is any audio have not been played, if not, use the url to +play it.

+

So the code is very simple and straight forward, it is just a loop to check the API, and play the audio.

+

code have a play_speech.py, all other files are some extent utilities functions.

+

For the hardware part, it only requires a speaker, so it can be running on a laptop, or working with a Raspberry Pi.

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Modules/index.html b/Modules/index.html new file mode 100644 index 00000000..e854ec99 --- /dev/null +++ b/Modules/index.html @@ -0,0 +1,4565 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Table of Content - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Table of Content

+

We will describe how each module is designed, so you can understand how it works further for future development.

+ +

If you want to check the details, you can either check the Source section or our GitHub repo code.

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Source/index.html b/Source/index.html new file mode 100644 index 00000000..fbefd04a --- /dev/null +++ b/Source/index.html @@ -0,0 +1,4759 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Introduction - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Code sources

+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/api/asgi/index.html b/Sources/API/api/asgi/index.html new file mode 100644 index 00000000..5b4b76de --- /dev/null +++ b/Sources/API/api/asgi/index.html @@ -0,0 +1,4640 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ASGI - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

ASGI

+ +
+ + + + +
+ +

ASGI config for api project.

+

It exposes the ASGI callable as a module-level variable named application.

+

For more information on this file, see +https://docs.djangoproject.com/en/5.0/howto/deployment/asgi/

+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/api/settings/index.html b/Sources/API/api/settings/index.html new file mode 100644 index 00000000..6dd4a6d3 --- /dev/null +++ b/Sources/API/api/settings/index.html @@ -0,0 +1,4642 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Settings - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Settings

+ +
+ + + + +
+ +

Django settings for api project.

+

Generated by 'django-admin startproject' using Django 5.0.2.

+

For more information on this file, see +https://docs.djangoproject.com/en/5.0/topics/settings/

+

For the full list of settings and their values, see +https://docs.djangoproject.com/en/5.0/ref/settings/

+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/api/urls/index.html b/Sources/API/api/urls/index.html new file mode 100644 index 00000000..d36d15fa --- /dev/null +++ b/Sources/API/api/urls/index.html @@ -0,0 +1,4649 @@ + + + + + + + + + + + + + + + + + + + + + + + + + URLs - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

URLs

+ +
+ + + + +
+ +

URL configuration for api project.

+

The urlpatterns list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/5.0/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))

+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/api/wsgi/index.html b/Sources/API/api/wsgi/index.html new file mode 100644 index 00000000..523ae8cb --- /dev/null +++ b/Sources/API/api/wsgi/index.html @@ -0,0 +1,4640 @@ + + + + + + + + + + + + + + + + + + + + + + + + + WSGI - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

WSGI

+ +
+ + + + +
+ +

WSGI config for api project.

+

It exposes the WSGI callable as a module-level variable named application.

+

For more information on this file, see +https://docs.djangoproject.com/en/5.0/howto/deployment/wsgi/

+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/admin/index.html b/Sources/API/authenticate/admin/index.html new file mode 100644 index 00000000..e2912a8a --- /dev/null +++ b/Sources/API/authenticate/admin/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Admin - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Admin

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/apps/index.html b/Sources/API/authenticate/apps/index.html new file mode 100644 index 00000000..779f8c93 --- /dev/null +++ b/Sources/API/authenticate/apps/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Apps - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Apps

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/migrations/0001_init/index.html b/Sources/API/authenticate/migrations/0001_init/index.html new file mode 100644 index 00000000..63382167 --- /dev/null +++ b/Sources/API/authenticate/migrations/0001_init/index.html @@ -0,0 +1,4575 @@ + + + + + + + + + + + + + + + + + + + + + 0001 init - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

0001 init

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/models/index.html b/Sources/API/authenticate/models/index.html new file mode 100644 index 00000000..de69590e --- /dev/null +++ b/Sources/API/authenticate/models/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Models - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Models

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/serializers/index.html b/Sources/API/authenticate/serializers/index.html new file mode 100644 index 00000000..16843435 --- /dev/null +++ b/Sources/API/authenticate/serializers/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Serializers - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Serializers

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/tests/index.html b/Sources/API/authenticate/tests/index.html new file mode 100644 index 00000000..b000f565 --- /dev/null +++ b/Sources/API/authenticate/tests/index.html @@ -0,0 +1,4619 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Tests - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Tests

+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/urls/index.html b/Sources/API/authenticate/urls/index.html new file mode 100644 index 00000000..f92793a9 --- /dev/null +++ b/Sources/API/authenticate/urls/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + URLs - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

URLs

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/utils/fire_and_forget/index.html b/Sources/API/authenticate/utils/fire_and_forget/index.html new file mode 100644 index 00000000..229ce581 --- /dev/null +++ b/Sources/API/authenticate/utils/fire_and_forget/index.html @@ -0,0 +1,4693 @@ + + + + + + + + + + + + + + + + + + + + + + + + + FireAndForget - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

FireAndForget

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ fire_and_forget(f) + +

+ + +
+ +

run it and forget it

+ +
+ Source code in API/authenticate/utils/fire_and_forget.py +
 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
def fire_and_forget(f):
+    """run it and forget it"""
+
+    def wrapped(*args, **kwargs):
+        loop = asyncio.new_event_loop()
+        loop.run_in_executor(None, f, *args, *kwargs)
+        loop.close()
+
+    return wrapped
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/utils/get_logger/index.html b/Sources/API/authenticate/utils/get_logger/index.html new file mode 100644 index 00000000..ac444180 --- /dev/null +++ b/Sources/API/authenticate/utils/get_logger/index.html @@ -0,0 +1,4637 @@ + + + + + + + + + + + + + + + + + + + + + + + + + GetLogger - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

GetLogger

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/utils/timer/index.html b/Sources/API/authenticate/utils/timer/index.html new file mode 100644 index 00000000..3ddc5827 --- /dev/null +++ b/Sources/API/authenticate/utils/timer/index.html @@ -0,0 +1,4986 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Timer - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Timer

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ timer + + +

+ + +
+ + +

util function used to log the time taken by a part of program

+ +
+ Source code in API/authenticate/utils/timer.py +
 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
class timer:
+    """
+    util function used to log the time taken by a part of program
+    """
+
+    def __init__(self, logger: Logger, message: str):
+        """
+        init the timer
+
+        Parameters
+        ----------
+        logger: Logger
+            logger to write the logs
+        message: str
+            message to log, like start xxx
+        """
+        self.message = message
+        self.logger = logger
+        self.start = 0
+        self.duration = 0
+        self.sub_timers = []
+
+    def __enter__(self):
+        """
+        context enter to start write this
+        """
+        self.start = time.time()
+        self.logger.info("Starting %s" % self.message)
+        return self
+
+    def __exit__(self, context, value, traceback):
+        """
+        context exit will write this
+        """
+        self.duration = time.time() - self.start
+        self.logger.info(f"Finished {self.message}, that took {self.duration:.3f}")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __enter__() + +

+ + +
+ +

context enter to start write this

+ +
+ Source code in API/authenticate/utils/timer.py +
27
+28
+29
+30
+31
+32
+33
def __enter__(self):
+    """
+    context enter to start write this
+    """
+    self.start = time.time()
+    self.logger.info("Starting %s" % self.message)
+    return self
+
+
+
+ +
+ +
+ + +

+ __exit__(context, value, traceback) + +

+ + +
+ +

context exit will write this

+ +
+ Source code in API/authenticate/utils/timer.py +
35
+36
+37
+38
+39
+40
def __exit__(self, context, value, traceback):
+    """
+    context exit will write this
+    """
+    self.duration = time.time() - self.start
+    self.logger.info(f"Finished {self.message}, that took {self.duration:.3f}")
+
+
+
+ +
+ +
+ + +

+ __init__(logger, message) + +

+ + +
+ +

init the timer

+
Parameters
+

logger: Logger + logger to write the logs +message: str + message to log, like start xxx

+ +
+ Source code in API/authenticate/utils/timer.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
def __init__(self, logger: Logger, message: str):
+    """
+    init the timer
+
+    Parameters
+    ----------
+    logger: Logger
+        logger to write the logs
+    message: str
+        message to log, like start xxx
+    """
+    self.message = message
+    self.logger = logger
+    self.start = 0
+    self.duration = 0
+    self.sub_timers = []
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/authenticate/views/index.html b/Sources/API/authenticate/views/index.html new file mode 100644 index 00000000..ebe7bce7 --- /dev/null +++ b/Sources/API/authenticate/views/index.html @@ -0,0 +1,4777 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Views - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Views

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ APITokenObtainPairView + + +

+ + +
+

+ Bases: TokenObtainPairView

+ + +
+ Source code in API/authenticate/views.py +
21
+22
+23
+24
+25
+26
+27
+28
+29
+30
class APITokenObtainPairView(TokenObtainPairView):
+    serializer_class = APITokenObtainPairSerializer
+
+    @swagger_auto_schema(
+        operation_description="Obtain JSON Web Token pair for user",
+        responses={200: APIReturnTokenSerializer},
+    )
+    def post(self, request, *args, **kwargs):
+        """Override the post method to add custom swagger documentation."""
+        return super().post(request, *args, **kwargs)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ post(request, *args, **kwargs) + +

+ + +
+ +

Override the post method to add custom swagger documentation.

+ +
+ Source code in API/authenticate/views.py +
24
+25
+26
+27
+28
+29
+30
@swagger_auto_schema(
+    operation_description="Obtain JSON Web Token pair for user",
+    responses={200: APIReturnTokenSerializer},
+)
+def post(self, request, *args, **kwargs):
+    """Override the post method to add custom swagger documentation."""
+    return super().post(request, *args, **kwargs)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/admin/index.html b/Sources/API/hardware/admin/index.html new file mode 100644 index 00000000..4e477d82 --- /dev/null +++ b/Sources/API/hardware/admin/index.html @@ -0,0 +1,5067 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Admin - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Admin

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ DataMultiModalConversationFKAdmin + + +

+ + +
+

+ Bases: ImportExportModelAdmin

+ + +

All the obj above will be self.multi_modal_conversation

+ +
+ Source code in API/hardware/admin.py +
462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
class DataMultiModalConversationFKAdmin(ImportExportModelAdmin):
+    """
+    All the obj above will be self.multi_modal_conversation
+    """
+
+    def audio__time_range(self, obj):
+        # format it "%Y-%m-%d %H:%M:%S"
+        if obj.multi_modal_conversation.audio is None:
+            return "No Audio"
+        start_time_str = obj.multi_modal_conversation.audio.start_time.strftime(
+            "%Y-%m-%d %H:%M:%S"
+        )
+        end_time_str = obj.multi_modal_conversation.audio.end_time.strftime(
+            "%Y-%m-%d %H:%M:%S"
+        )
+        return f"{start_time_str} - {end_time_str}"
+
+    audio__time_range.short_description = "Time Range: Audio"
+
+    def video__time_range(self, obj):
+        if len(obj.multi_modal_conversation.video.all()) == 0:
+            return "No Video"
+        videos = obj.multi_modal_conversation.video.all().order_by("start_time")
+        # get the first video start time and the last video end time
+        start_time_str = videos.first().start_time.strftime("%Y-%m-%d %H:%M:%S")
+        end_time_str = videos.last().end_time.strftime("%Y-%m-%d %H:%M:%S")
+        return f"{start_time_str} - {end_time_str}"
+
+    video__time_range.short_description = "Time Range: Video"
+
+    def play_audio(self, obj):
+        if obj.multi_modal_conversation.audio is None:
+            return "No Audio"
+
+        return mark_safe(
+            f'<audio controls name="media">'
+            f'<source src="{obj.multi_modal_conversation.audio.url()}" type="audio/mpeg"></audio>'
+        )
+
+    def play_video(self, obj):
+        if (
+            obj.multi_modal_conversation.video is None
+            or len(obj.multi_modal_conversation.video.all()) == 0
+        ):
+            return "No Video"
+        return mark_safe(
+            f'<video width="320" height="240" controls>'
+            f'<source src="{obj.multi_modal_conversation.video_url()}" type="video/mp4"></video>'
+        )
+
+    def play_res_speech(self, obj):
+        if obj.multi_modal_conversation.res_speech is None:
+            return "No Response Speech"
+        return mark_safe(
+            f'<audio controls name="media">'
+            f'<source src="{obj.multi_modal_conversation.res_speech.url()}" type="audio/mpeg"></audio>'
+        )
+
+    def speech_to_text(self, obj):
+        if obj.multi_modal_conversation.text is None:
+            return "No Text"
+        return obj.multi_modal_conversation.text.text
+
+    def response_text(self, obj):
+        if obj.multi_modal_conversation.res_text is None:
+            return "No Response Text"
+        return obj.multi_modal_conversation.res_text.text
+
+    def annotation_records(self, obj):
+        annotations = obj.annotations
+        if not annotations:
+            return "No Annotations"
+        """
+        Get this presentation into a html like this:
+
+        User: {username}
+        Annotation Overall: {annotation_overall}
+        Annotation Text Modality: {annotation_text_modality}
+        Annotation Audio Modality: {annotation_audio_modality}
+        ----
+        User: {username}
+        ....
+
+        """
+
+        return_html = "<div>"
+        return_html += f"<h5>Total Annotator: {len(annotations.items())} </h5>"
+        return_html += "<hr>"
+        for user_id, annotation in annotations.items():
+            user = User.objects.get(pk=user_id)
+            return_html += f"<h6>User: {user.username}</h6>"
+            return_html += "<ul>"
+            for annotation_key, annotation_value in annotation.items():
+                return_html += f"<li>{annotation_key}: {annotation_value}</li>"
+            return_html += "</ul>"
+            return_html += "<hr>"
+        return_html += "</div>"
+        return mark_safe(return_html)
+
+    list_display = (
+        "id",
+        "audio__time_range",
+        "video__time_range",
+    )
+    exclude = (
+        "audio",
+        "video",
+        "text",
+        "annotations",
+    )
+    search_fields = (
+        "text__text",
+        "res_text__text",
+        "track_id",
+        "text",
+    )
+    readonly_fields = (
+        # "track_id",
+        "play_audio",
+        "audio__time_range",
+        "speech_to_text",
+        "play_video",
+        "video__time_range",
+        "response_text",
+        "play_res_speech",
+        "created_at",
+        "updated_at",
+        "annotation_records",
+    )
+    list_filter = ("created_at", ClusterFilter)
+
+    change_form_template = "admin/hardware/conversation/change_form.html"
+
+    def response_change(self, request, obj):
+        if "_saveandnext" in request.POST:
+            next_obj = self.get_next_obj(obj)
+            if next_obj:
+                return HttpResponseRedirect(
+                    reverse(
+                        "admin:%s_%s_change"
+                        % (
+                            obj._meta.app_label,
+                            obj._meta.model_name,
+                        ),
+                        args=[next_obj.pk],
+                    )
+                )
+        return super().response_change(request, obj)
+
+    def get_next_obj(self, obj):
+        # Define your logic to get the next object
+        # use self model to get the next object
+        obj_model = obj.__class__
+        next_obj = obj_model.objects.filter(pk__gt=obj.pk).order_by("pk").first()
+        return next_obj
+
+    def change_view(self, request, object_id, form_url="", extra_context=None):
+        if extra_context is None:
+            extra_context = {}
+
+        extra_context["additional_save_buttons"] = [
+            {"name": "_saveandnext", "value": "Save and Next"}
+        ]
+
+        return super().change_view(request, object_id, form_url, extra_context)
+
+    def get_form(self, request, *args, **kwargs):
+        form = super().get_form(request, *args, **kwargs)
+        form.current_user = request.user
+        return form
+
+    def save_model(self, request, obj, form, change):
+        annotation_data = {}
+        for key, value in form.cleaned_data.items():
+            if key.startswith("annotation_"):
+                annotation_data[key] = value
+
+        if not obj.annotations:
+            obj.annotations = {}
+        current_annotations = obj.annotations.get(request.user.id, {})
+        obj.annotations[request.user.id] = {
+            **annotation_data,
+            **current_annotations,
+        }
+
+        super().save_model(request, obj, form, change)
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/apps/index.html b/Sources/API/hardware/apps/index.html new file mode 100644 index 00000000..92bac1b3 --- /dev/null +++ b/Sources/API/hardware/apps/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Apps - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Apps

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/forms/index.html b/Sources/API/hardware/forms/index.html new file mode 100644 index 00000000..277a0bee --- /dev/null +++ b/Sources/API/hardware/forms/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Forms - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Forms

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/migrations/0001_init/index.html b/Sources/API/hardware/migrations/0001_init/index.html new file mode 100644 index 00000000..5c346f53 --- /dev/null +++ b/Sources/API/hardware/migrations/0001_init/index.html @@ -0,0 +1,4575 @@ + + + + + + + + + + + + + + + + + + + + + 0001 init - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

0001 init

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/migrations/0002_add_rag/index.html b/Sources/API/hardware/migrations/0002_add_rag/index.html new file mode 100644 index 00000000..7bf272cd --- /dev/null +++ b/Sources/API/hardware/migrations/0002_add_rag/index.html @@ -0,0 +1,4575 @@ + + + + + + + + + + + + + + + + + + + + + 0002 add rag - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

0002 add rag

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/models/index.html b/Sources/API/hardware/models/index.html new file mode 100644 index 00000000..8ea6b28d --- /dev/null +++ b/Sources/API/hardware/models/index.html @@ -0,0 +1,5744 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Models - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Models

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ DataAudio + + +

+ + +
+

+ Bases: Model

+ + +

Link to home and hardware device, and the audio data will be stored in the database +It will be created by the endpoint from client side when audio data is acquired

+ +
+ Source code in API/hardware/models.py +
 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
class DataAudio(models.Model):
+    """
+    Link to home and hardware device, and the audio data will be stored in the database
+    It will be created by the endpoint from client side when audio data is acquired
+    """
+
+    home = models.ForeignKey(
+        Home, on_delete=models.CASCADE, related_name="audio", null=True, blank=True
+    )
+    hardware_device_mac_address = models.CharField(
+        max_length=100,
+        help_text="The mac address of the hardware device",
+        null=True,
+        blank=True,
+    )
+    uid = models.CharField(
+        max_length=100,
+        help_text="the uid of the audio acquire session, can be treated as scenario id",
+    )
+    sequence_index = models.IntegerField(help_text="The sequence index of the audio")
+    audio_file = models.CharField(max_length=100, help_text="The audio file")
+    start_time = models.DateTimeField(help_text="The start time of the audio")
+    end_time = models.DateTimeField(help_text="The end time of the audio")
+
+    created_at = models.DateTimeField(
+        auto_now_add=True, help_text="The created time of the audio"
+    )
+    updated_at = models.DateTimeField(
+        auto_now=True, help_text="The updated time of the audio"
+    )
+    track_id = models.CharField(
+        max_length=100,
+        help_text="The track id of the multimodal conversation",
+        null=True,
+        blank=True,
+    )
+
+    @classmethod
+    def create_obj(
+        cls,
+        home: Home,
+        uid: str,
+        hardware_device_mac_address: str,
+        sequence_index: int,
+        audio_file: str,
+        start_time: datetime,
+        end_time: datetime,
+        track_id: str = None,
+    ):
+        """
+        Create an audio data object
+        """
+        return cls.objects.create(
+            home=home,
+            hardware_device_mac_address=hardware_device_mac_address,
+            uid=uid,
+            sequence_index=sequence_index,
+            audio_file=audio_file,
+            start_time=start_time,
+            end_time=end_time,
+            track_id=track_id,
+        )
+
+    def __str__(self):
+        return f"{self.uid} - {self.audio_file}"
+
+    def url(self):
+        """
+        get the file, and create media url
+        Returns:
+
+        """
+        return f"/hardware/client_audio/{self.id}"
+
+    class Meta:
+        verbose_name = "Data Audio"
+        verbose_name_plural = "Data Audios"
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ create_obj(home, uid, hardware_device_mac_address, sequence_index, audio_file, start_time, end_time, track_id=None) + + + classmethod + + +

+ + +
+ +

Create an audio data object

+ +
+ Source code in API/hardware/models.py +
108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
@classmethod
+def create_obj(
+    cls,
+    home: Home,
+    uid: str,
+    hardware_device_mac_address: str,
+    sequence_index: int,
+    audio_file: str,
+    start_time: datetime,
+    end_time: datetime,
+    track_id: str = None,
+):
+    """
+    Create an audio data object
+    """
+    return cls.objects.create(
+        home=home,
+        hardware_device_mac_address=hardware_device_mac_address,
+        uid=uid,
+        sequence_index=sequence_index,
+        audio_file=audio_file,
+        start_time=start_time,
+        end_time=end_time,
+        track_id=track_id,
+    )
+
+
+
+ +
+ +
+ + +

+ url() + +

+ + +
+ +

get the file, and create media url +Returns:

+ +
+ Source code in API/hardware/models.py +
137
+138
+139
+140
+141
+142
+143
def url(self):
+    """
+    get the file, and create media url
+    Returns:
+
+    """
+    return f"/hardware/client_audio/{self.id}"
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ DataMultiModalConversation + + +

+ + +
+

+ Bases: Model

+ + +

It will be created when a audio is created +Then video will be added when emotion detection is triggered, or other task require video +Text will be added when speech2text is done +ResText will be added when the text is processed by the language model +ResSpeech will be added when the text is processed by the text2speech

+ +
+ Source code in API/hardware/models.py +
274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
class DataMultiModalConversation(models.Model):
+    """
+    It will be created when a audio is created
+    Then video will be added when emotion detection is triggered, or other task require video
+    Text will be added when speech2text is done
+    ResText will be added when the text is processed by the language model
+    ResSpeech will be added when the text is processed by the text2speech
+    """
+
+    audio = models.OneToOneField(
+        DataAudio,
+        on_delete=models.SET_NULL,
+        related_name="multi_modal_conversation",
+        null=True,
+        blank=True,
+    )
+    # video should be an array field
+    video = models.ManyToManyField(
+        DataVideo, related_name="multi_modal_conversation", blank=True
+    )
+    text = models.OneToOneField(
+        DataText,
+        on_delete=models.SET_NULL,
+        related_name="multi_modal_conversation",
+        null=True,
+        blank=True,
+    )
+
+    res_text = models.OneToOneField(
+        ResText,
+        on_delete=models.SET_NULL,
+        related_name="multi_modal_conversation",
+        null=True,
+        blank=True,
+    )
+    res_speech = models.OneToOneField(
+        ResSpeech,
+        on_delete=models.SET_NULL,
+        related_name="multi_modal_conversation",
+        null=True,
+        blank=True,
+    )
+    created_at = models.DateTimeField(
+        auto_now_add=True, help_text="The created time of the multi-modal conversation"
+    )
+    updated_at = models.DateTimeField(
+        auto_now=True, help_text="The updated time of the multi-modal conversation"
+    )
+
+    track_id = models.CharField(
+        max_length=100,
+        help_text="The track id of the multimodal conversation",
+        null=True,
+        blank=True,
+    )
+    annotations = models.JSONField(
+        help_text="The annotations of the emotion detection",
+        null=True,
+        blank=True,
+        default=dict,
+    )
+
+    multi_turns_annotations = models.JSONField(
+        help_text="The annotations of the multi-turns",
+        null=True,
+        blank=True,
+        default=dict,
+    )
+    tags = TaggableManager(blank=True)
+
+    def __str__(self):
+        return f"{self.id}"
+
+    def video_url(self):
+        if len(self.video.all()) == 0:
+            return "No Video"
+        return f"/hardware/client_video/{self.id}"
+
+    class Meta:
+        verbose_name = "Conversation"
+        verbose_name_plural = "Conversations"
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ DataText + + +

+ + +
+

+ Bases: Model

+ + +

The text data will be stored in the database +It will be created after speech2text is done

+ +
+ Source code in API/hardware/models.py +
192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
class DataText(models.Model):
+    """
+    The text data will be stored in the database
+    It will be created after speech2text is done
+    """
+
+    # foreign key to the audio
+    audio = models.ForeignKey(
+        DataAudio,
+        on_delete=models.CASCADE,
+        related_name="text",
+        help_text="The audio data",
+    )
+    text = models.TextField(help_text="The text of the audio")
+
+    created_at = models.DateTimeField(
+        auto_now_add=True, help_text="The created time of the text"
+    )
+    updated_at = models.DateTimeField(
+        auto_now=True, help_text="The updated time of the text"
+    )
+    model_name = models.CharField(
+        max_length=100,
+        help_text="The name of the model",
+        null=True,
+        blank=True,
+        default="whisper",
+    )
+
+    def __str__(self):
+        return self.text
+
+    class Meta:
+        verbose_name = "Data Text"
+        verbose_name_plural = "Data Texts"
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ DataVideo + + +

+ + +
+

+ Bases: Model

+ + +

Link to home and hardware device, and the video data will be stored in the database +It will be created by the endpoint from client side when video data is acquired +Same as the audio data, the video data will be stored in the database +It will not be directly connected to the audio data +Audio data and video data will be connected by the time range softly

+ +
+ Source code in API/hardware/models.py +
150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
class DataVideo(models.Model):
+    """
+    Link to home and hardware device, and the video data will be stored in the database
+    It will be created by the endpoint from client side when video data is acquired
+    Same as the audio data, the video data will be stored in the database
+    It will not be directly connected to the audio data
+    Audio data and video data will be connected by the time range softly
+    """
+
+    home = models.ForeignKey(
+        Home, on_delete=models.CASCADE, related_name="video", null=True, blank=True
+    )
+    uid = models.CharField(
+        max_length=100,
+        help_text="the uid of the video acquire session, link back to client logs",
+    )
+    hardware_device_mac_address = models.CharField(
+        max_length=100,
+        help_text="The mac address of the hardware device",
+        null=True,
+        blank=True,
+    )
+    # TODO: add start and end time?
+    video_file = models.CharField(max_length=100, help_text="The video file")
+    start_time = models.DateTimeField(help_text="The start time of the video")
+    end_time = models.DateTimeField(help_text="The end time of the video")
+
+    created_at = models.DateTimeField(
+        auto_now_add=True, help_text="The created time of the video"
+    )
+    updated_at = models.DateTimeField(
+        auto_now=True, help_text="The updated time of the video"
+    )
+
+    def __str__(self):
+        return f"{self.uid} - {self.video_file}"
+
+    class Meta:
+        verbose_name = "Data Video"
+        verbose_name_plural = "Data Videos"
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ HardWareDevice + + +

+ + +
+

+ Bases: Model

+ + +

One home can have multiple hardware devices, and the hardware device can be used to acquire the audio and video data

+ +
+ Source code in API/hardware/models.py +
28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
class HardWareDevice(models.Model):
+    """
+    One home can have multiple hardware devices, and the hardware device can be used to acquire the audio and video data
+
+    """
+
+    home = models.ForeignKey(
+        Home,
+        on_delete=models.CASCADE,
+        related_name="hardware_devices",
+        null=True,
+        blank=True,
+    )
+    mac_address = models.CharField(
+        max_length=100, help_text="The mac address of the hardware device", unique=True
+    )
+    device_name = models.CharField(
+        max_length=100,
+        help_text="The name of the hardware device",
+        null=True,
+        blank=True,
+    )
+    device_type = models.CharField(
+        max_length=100,
+        help_text="The type of the hardware device",
+        null=True,
+        blank=True,
+    )
+    description = models.TextField(
+        help_text="The description of the hardware device", null=True, blank=True
+    )
+    created_at = models.DateTimeField(
+        auto_now_add=True, help_text="The created time of the hardware device"
+    )
+    updated_at = models.DateTimeField(
+        auto_now=True, help_text="The updated time of the hardware device"
+    )
+
+    class Meta:
+        verbose_name = "Hardware Device"
+        verbose_name_plural = "Hardware Devices"
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ Home + + +

+ + +
+

+ Bases: Model

+ + +

Created by setup manually, and the client side can specify the home, so all data will be connected to this.

+ +
+ Source code in API/hardware/models.py +
 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
class Home(models.Model):
+    """
+    Created by setup manually, and the client side can specify the home, so all data will be connected to this.
+    """
+
+    user = models.ForeignKey(User, on_delete=models.CASCADE)
+    name = models.CharField(
+        max_length=100, help_text="The name of the home", default="Blue Boat House"
+    )
+    address = models.CharField(
+        max_length=100,
+        help_text="The address of the home",
+        default="1 Kings Park Ave, Crawley WA 6009",
+    )
+
+    def __str__(self):
+        return f"{self.name}"
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/serializers/index.html b/Sources/API/hardware/serializers/index.html new file mode 100644 index 00000000..22d3d4b3 --- /dev/null +++ b/Sources/API/hardware/serializers/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Serializers - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Serializers

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/signals/index.html b/Sources/API/hardware/signals/index.html new file mode 100644 index 00000000..d931487d --- /dev/null +++ b/Sources/API/hardware/signals/index.html @@ -0,0 +1,4691 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Signals - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Signals

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ add_data_multimodal_conversation_entry(sender, instance, created, **kwargs) + +

+ + +
+ +

Add data multimodal conversation

+ +
+ Source code in API/hardware/signals.py +
 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
@receiver(post_save, sender=DataAudio)
+def add_data_multimodal_conversation_entry(sender, instance, created, **kwargs):
+    """
+    Add data multimodal conversation
+    """
+    if created:
+        DataMultiModalConversation.objects.create(
+            audio=instance, track_id=instance.track_id
+        )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/tests/index.html b/Sources/API/hardware/tests/index.html new file mode 100644 index 00000000..27cc7cb2 --- /dev/null +++ b/Sources/API/hardware/tests/index.html @@ -0,0 +1,4619 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Tests - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Tests

+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/urls/index.html b/Sources/API/hardware/urls/index.html new file mode 100644 index 00000000..c0b7061f --- /dev/null +++ b/Sources/API/hardware/urls/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + URLs - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

URLs

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/hardware/views/index.html b/Sources/API/hardware/views/index.html new file mode 100644 index 00000000..671003be --- /dev/null +++ b/Sources/API/hardware/views/index.html @@ -0,0 +1,5570 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Views - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Views

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ AudioDataViewSet + + +

+ + +
+

+ Bases: ModelViewSet

+ + +
+ Source code in API/hardware/views.py +
48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
class AudioDataViewSet(viewsets.ModelViewSet):
+    queryset = DataAudio.objects.all()
+    serializer_class = AudioDataSerializer
+
+    @swagger_auto_schema(
+        operation_summary="Get an audio data s3 url",
+        operation_description="Get an audio data",
+        responses={200: "The audio data"},
+        tags=["hardware"],
+    )
+    @action(
+        detail=False,
+        methods=["post"],
+        url_path="get_audio_data",
+        url_name="get_audio_data",
+    )
+    def get_audio_data(self, request):
+        """Override the post method to add custom swagger documentation."""
+        audio_id = request.data.get("audio_id", None)
+        if audio_id is None:
+            return Response(
+                {"message": "audio_id is required."},
+                status=status.HTTP_400_BAD_REQUEST,
+            )
+        audio_obj = DataAudio.objects.filter(id=audio_id).first()
+        if audio_obj is None:
+            return Response(
+                {"message": "No audio data found."},
+                status=status.HTTP_404_NOT_FOUND,
+            )
+
+        s3_client = settings.BOTO3_SESSION.client("s3")
+        try:
+            response = s3_client.generate_presigned_url(
+                "get_object",
+                Params={
+                    "Bucket": settings.S3_BUCKET,
+                    "Key": f"Listener/audio/{audio_obj.uid}/audio/{audio_obj.audio_file}",
+                },
+                ExpiresIn=3600,
+            )
+
+            return Response({"audio_url": response}, status=status.HTTP_200_OK)
+        except Exception as e:
+            logger.error(e)
+            return Response(
+                {"message": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR
+            )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ get_audio_data(request) + +

+ + +
+ +

Override the post method to add custom swagger documentation.

+ +
+ Source code in API/hardware/views.py +
52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
@swagger_auto_schema(
+    operation_summary="Get an audio data s3 url",
+    operation_description="Get an audio data",
+    responses={200: "The audio data"},
+    tags=["hardware"],
+)
+@action(
+    detail=False,
+    methods=["post"],
+    url_path="get_audio_data",
+    url_name="get_audio_data",
+)
+def get_audio_data(self, request):
+    """Override the post method to add custom swagger documentation."""
+    audio_id = request.data.get("audio_id", None)
+    if audio_id is None:
+        return Response(
+            {"message": "audio_id is required."},
+            status=status.HTTP_400_BAD_REQUEST,
+        )
+    audio_obj = DataAudio.objects.filter(id=audio_id).first()
+    if audio_obj is None:
+        return Response(
+            {"message": "No audio data found."},
+            status=status.HTTP_404_NOT_FOUND,
+        )
+
+    s3_client = settings.BOTO3_SESSION.client("s3")
+    try:
+        response = s3_client.generate_presigned_url(
+            "get_object",
+            Params={
+                "Bucket": settings.S3_BUCKET,
+                "Key": f"Listener/audio/{audio_obj.uid}/audio/{audio_obj.audio_file}",
+            },
+            ExpiresIn=3600,
+        )
+
+        return Response({"audio_url": response}, status=status.HTTP_200_OK)
+    except Exception as e:
+        logger.error(e)
+        return Response(
+            {"message": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR
+        )
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ Text2SpeechViewSet + + +

+ + +
+

+ Bases: ModelViewSet

+ + +
+ Source code in API/hardware/views.py +
160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
class Text2SpeechViewSet(viewsets.ModelViewSet):
+    queryset = ResSpeech.objects.all()
+    serializer_class = ResSpeechSerializer
+
+    # retrieve it based on the mac address
+    def get_queryset(self):
+        queryset = ResSpeech.objects.filter(
+            played=False, text2speech_file__isnull=False
+        )
+        home_id = self.request.query_params.get("home_id", None)
+        logger.info(f"Home id: {home_id}")
+        if (home_id is not None) and (home_id != "None"):
+            home = Home.objects.filter(id=home_id).first()
+            if not home:
+                return None
+            queryset = queryset.filter(
+                home=home, played=False, text2speech_file__isnull=False
+            )
+
+        queryset = queryset.order_by("created_at")
+        item = queryset.first()
+        if item:
+            item.played = True
+            item.save()
+        if item:
+            return [item]
+        else:
+            return None
+
+    def list(self, request, *args, **kwargs):
+        queryset = self.get_queryset()
+        if queryset is None:
+            return Response(
+                {"message": "No text to speech found."},
+                status=status.HTTP_404_NOT_FOUND,
+            )
+
+        item = queryset[0]
+
+        s3_url = None
+        if item.text2speech_file is not None:
+            local_file = settings.AI_MEDIA_ROOT / item.text2speech_file.split("/")[-1]
+            logger.info(local_file)
+            if local_file.exists() and (
+                settings.STORAGE_SOLUTION == settings.STORAGE_SOLUTION_VOLUME
+                or settings.STORAGE_SOLUTION == settings.STORAGE_SOLUTION_LOCAL
+            ):
+                s3_client = settings.BOTO3_SESSION.client("s3")
+                s3_key = f"Responder/tts/{item.text2speech_file.split('/')[-1]}"
+                try:
+                    s3_client.upload_file(
+                        local_file,
+                        settings.S3_BUCKET,
+                        s3_key,
+                    )
+                except Exception as e:
+                    logger.error(e)
+                    logger.exception(e)
+                    # response with the HttpResponse
+            try:
+                s3_client = settings.BOTO3_SESSION.client("s3")
+                response = s3_client.generate_presigned_url(
+                    "get_object",
+                    Params={
+                        "Bucket": settings.S3_BUCKET,
+                        "Key": f"Responder/tts/{item.text2speech_file}",
+                    },
+                    ExpiresIn=3600,
+                )
+                s3_url = response
+            except Exception as e:
+                logger.error(e)
+        data = ResSpeechSerializer(item).data
+        data["tts_url"] = s3_url
+        logger.info(s3_url)
+        return Response(data, status=status.HTTP_200_OK)
+
+    @swagger_auto_schema(
+        operation_summary="Get speech audio s3 url",
+        operation_description="Get the text to speech audio s3 url",
+        responses={200: "The text to speech"},
+        tags=["hardware"],
+    )
+    @action(
+        detail=False,
+        methods=["post"],
+        url_path="get_text_to_speech",
+        url_name="get_text_to_speech",
+    )
+    def get_text_to_speech(self, request):
+        """Override the post method to add custom swagger documentation."""
+        text2speech_id = request.data.get("text2speech_id", None)
+        if text2speech_id is None:
+            return Response(
+                {"message": "text2speech_id is required."},
+                status=status.HTTP_400_BAD_REQUEST,
+            )
+        text2speech_obj = ResSpeech.objects.filter(id=text2speech_id).first()
+        if text2speech_obj is None:
+            return Response(
+                {"message": "No text to speech found."},
+                status=status.HTTP_404_NOT_FOUND,
+            )
+
+        s3_client = settings.BOTO3_SESSION.client("s3")
+        try:
+            response = s3_client.generate_presigned_url(
+                "get_object",
+                Params={
+                    "Bucket": settings.S3_BUCKET,
+                    "Key": f"tts/{text2speech_obj.text2speech_file}",
+                },
+                ExpiresIn=3600,
+            )
+
+            return Response({"tts_url": response}, status=status.HTTP_200_OK)
+        except Exception as e:
+            logger.error(e)
+            return Response(
+                {"message": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR
+            )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ get_text_to_speech(request) + +

+ + +
+ +

Override the post method to add custom swagger documentation.

+ +
+ Source code in API/hardware/views.py +
237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
@swagger_auto_schema(
+    operation_summary="Get speech audio s3 url",
+    operation_description="Get the text to speech audio s3 url",
+    responses={200: "The text to speech"},
+    tags=["hardware"],
+)
+@action(
+    detail=False,
+    methods=["post"],
+    url_path="get_text_to_speech",
+    url_name="get_text_to_speech",
+)
+def get_text_to_speech(self, request):
+    """Override the post method to add custom swagger documentation."""
+    text2speech_id = request.data.get("text2speech_id", None)
+    if text2speech_id is None:
+        return Response(
+            {"message": "text2speech_id is required."},
+            status=status.HTTP_400_BAD_REQUEST,
+        )
+    text2speech_obj = ResSpeech.objects.filter(id=text2speech_id).first()
+    if text2speech_obj is None:
+        return Response(
+            {"message": "No text to speech found."},
+            status=status.HTTP_404_NOT_FOUND,
+        )
+
+    s3_client = settings.BOTO3_SESSION.client("s3")
+    try:
+        response = s3_client.generate_presigned_url(
+            "get_object",
+            Params={
+                "Bucket": settings.S3_BUCKET,
+                "Key": f"tts/{text2speech_obj.text2speech_file}",
+            },
+            ExpiresIn=3600,
+        )
+
+        return Response({"tts_url": response}, status=status.HTTP_200_OK)
+    except Exception as e:
+        logger.error(e)
+        return Response(
+            {"message": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR
+        )
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ list_files(request) + +

+ + +
+ +

List all the files in the S3 bucket

+ +
+ Source code in API/hardware/views.py +
497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
@api_view(["GET"])
+@permission_classes([IsAuthenticated])
+def list_files(request):
+    """
+    List all the files in the S3 bucket
+    """
+    from_time = request.data.get("from_time", None)
+    logger.info(f"From time: {from_time}")
+    if from_time is None:
+        # default to 100 day ago
+        from_time = datetime.now() - timedelta(days=100)
+    else:
+        # get the from_time from the timestamp
+        from_time = datetime.fromtimestamp(float(from_time))
+
+    audio_files = DataAudio.objects.filter(created_at__gte=from_time)
+    video_files = DataVideo.objects.filter(created_at__gte=from_time)
+    audio_list_json = AudioDataSerializer(audio_files, many=True).data
+    video_list_json = VideoDataSerializer(video_files, many=True).data
+    return Response(
+        {"audio_files": audio_list_json, "video_files": video_list_json},
+        status=status.HTTP_200_OK,
+    )
+
+
+
+ +
+ +
+ + +

+ upload_file(request) + +

+ + +
+ +

This is for temporarily solution, as we host the centre server, +and will not provide the S3 access to the general user

+

So to testout our system, you can use this endpoint to upload files to S3 +Focus on client and AI side

+ +
+ Source code in API/hardware/views.py +
455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
@api_view(["POST"])
+@permission_classes([IsAuthenticated])
+def upload_file(request):
+    """
+    This is for temporarily solution, as we host the centre server,
+    and will not provide the S3 access to the general user
+
+    So to testout our system, you can use this endpoint to upload files to S3
+    Focus on client and AI side
+
+    """
+    file = request.FILES.get("file")
+    if file is None:
+        return Response(
+            {"message": "No file found."},
+            status=status.HTTP_400_BAD_REQUEST,
+        )
+    s3_client = settings.BOTO3_SESSION.client("s3")
+    dest_path = request.data.get("dest_path", None)
+    if dest_path is None:
+        return Response(
+            {"message": "dest_path is required."},
+            status=status.HTTP_400_BAD_REQUEST,
+        )
+    try:
+        s3_client.upload_fileobj(
+            file,
+            settings.S3_BUCKET,
+            dest_path,
+        )
+        return Response(
+            {"message": "File uploaded successfully."},
+            status=status.HTTP_200_OK,
+        )
+    except Exception as e:
+        logger.error(e)
+        return Response(
+            {"message": str(e)},
+            status=status.HTTP_500_INTERNAL_SERVER_ERROR,
+        )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/admin/index.html b/Sources/API/llm/admin/index.html new file mode 100644 index 00000000..30f10b84 --- /dev/null +++ b/Sources/API/llm/admin/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Admin - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Admin

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/apps/index.html b/Sources/API/llm/apps/index.html new file mode 100644 index 00000000..f0728f1c --- /dev/null +++ b/Sources/API/llm/apps/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Apps - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Apps

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/llm/config/index.html b/Sources/API/llm/llm/config/index.html new file mode 100644 index 00000000..3b6f7aea --- /dev/null +++ b/Sources/API/llm/llm/config/index.html @@ -0,0 +1,4637 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Config - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Config

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/management/commands/check_models/index.html b/Sources/API/llm/management/commands/check_models/index.html new file mode 100644 index 00000000..0ff13c28 --- /dev/null +++ b/Sources/API/llm/management/commands/check_models/index.html @@ -0,0 +1,4868 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CheckModels - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CheckModels

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Command + + +

+ + +
+

+ Bases: BaseCommand

+ + +
+ Source code in API/llm/management/commands/check_models.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
class Command(BaseCommand):
+    help = "Check models and update the database"
+
+    def handle(self, *args, **options):
+        """
+        Loop through the MODELS dictionary and check if the model is in the database. If it is not, add it.
+        :param args:
+        :param options:
+        :return:
+        """
+
+        for model_families in MODELS:
+            model_family = model_families["name"]
+            model_type = model_families["model_type"]
+            for model_info in model_families["models"]:
+                if not LLMConfigRecords.objects.filter(
+                    model_name=model_info["name"]
+                ).exists():
+                    record = LLMConfigRecords(
+                        model_name=model_info["name"],
+                        model_size=model_info["size"],
+                        model_family=model_family,
+                        model_type=model_type,
+                        repo=model_info["repo"],
+                        filename=model_info["filename"],
+                        available=False,
+                    )
+                    record.save()
+                    logger.critical(f"Added {model_info['name']} to the database")
+                else:
+                    logger.critical(f"{model_info['name']} is already in the database")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ handle(*args, **options) + +

+ + +
+ +

Loop through the MODELS dictionary and check if the model is in the database. If it is not, add it. +:param args: +:param options: +:return:

+ +
+ Source code in API/llm/management/commands/check_models.py +
13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
def handle(self, *args, **options):
+    """
+    Loop through the MODELS dictionary and check if the model is in the database. If it is not, add it.
+    :param args:
+    :param options:
+    :return:
+    """
+
+    for model_families in MODELS:
+        model_family = model_families["name"]
+        model_type = model_families["model_type"]
+        for model_info in model_families["models"]:
+            if not LLMConfigRecords.objects.filter(
+                model_name=model_info["name"]
+            ).exists():
+                record = LLMConfigRecords(
+                    model_name=model_info["name"],
+                    model_size=model_info["size"],
+                    model_family=model_family,
+                    model_type=model_type,
+                    repo=model_info["repo"],
+                    filename=model_info["filename"],
+                    available=False,
+                )
+                record.save()
+                logger.critical(f"Added {model_info['name']} to the database")
+            else:
+                logger.critical(f"{model_info['name']} is already in the database")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/migrations/0001_init/index.html b/Sources/API/llm/migrations/0001_init/index.html new file mode 100644 index 00000000..ce7aa4b4 --- /dev/null +++ b/Sources/API/llm/migrations/0001_init/index.html @@ -0,0 +1,4575 @@ + + + + + + + + + + + + + + + + + + + + + 0001 init - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

0001 init

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/models/index.html b/Sources/API/llm/models/index.html new file mode 100644 index 00000000..8ab554e5 --- /dev/null +++ b/Sources/API/llm/models/index.html @@ -0,0 +1,4954 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Models - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Models

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ LLMConfigRecords + + +

+ + +
+

+ Bases: Model

+ + +
+ Source code in API/llm/models.py +
13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
class LLMConfigRecords(models.Model):
+    model_name = models.CharField(max_length=100)
+    model_size = models.CharField(max_length=100)
+    model_family = models.CharField(max_length=100)
+    model_type = models.CharField(
+        max_length=100,
+        choices=[
+            ("hf", "HuggingFace"),
+            ("api", "API"),
+            ("llama.cpp", "llama.cpp"),
+            ("chatglm.cpp", "chatglm.cpp"),
+        ],
+        default="hf",
+    )
+    repo = models.CharField(max_length=100, blank=True, null=True)
+    filename = models.CharField(max_length=100, blank=True, null=True)
+    file_size = models.FloatField(blank=True, null=True)
+    available = models.BooleanField(default=False)
+    created_at = models.DateTimeField(auto_now_add=True)
+    updated_at = models.DateTimeField(auto_now=True)
+
+    def __str__(self):
+        return f"{self.model_name} - {self.created_at.strftime('%Y-%m-%d %H:%M:%S')}"
+
+    @property
+    def model_path(self):
+        return Path(
+            settings.BASE_DIR
+            / "llm"
+            / "llm_call"
+            / "models"
+            / self.model_family
+            / self.filename
+        )
+
+    def download_model(self):
+        """
+        Download the model from the model_details
+        :return:
+        """
+        download_url = hf_hub_url(repo_id=self.repo, filename=self.filename)
+        logger.critical(f"Downloading model from {download_url}")
+
+        model_general_folder = Path(
+            settings.BASE_DIR / "llm" / "llm_call" / "models" / self.model_family
+        )
+        logger.critical(f"Model folder {model_general_folder}")
+        model_general_folder.mkdir(parents=True, exist_ok=True)
+        filename = model_general_folder / self.filename
+
+        response = requests.get(download_url, stream=True)
+
+        # Total size in bytes.
+        total_size = int(response.headers.get("content-length", 0))
+        block_size = 1024  # 1 Kilobyte
+        logger.critical(f"Downloading {self.filename} to {model_general_folder}")
+        logger.critical(f"Total size: {total_size}")
+        progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
+        with open(filename, "wb") as file:
+            for data in response.iter_content(block_size):
+                progress_bar.update(len(data))
+                file.write(data)
+        progress_bar.close()
+
+        if total_size != 0 and progress_bar.n != total_size:
+            logger.error("ERROR, something went wrong")
+            return False
+        return True
+
+    class Meta:
+        verbose_name = "LLM Config Record"
+        verbose_name_plural = "LLM Config Records"
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ download_model() + +

+ + +
+ +

Download the model from the model_details +:return:

+ +
+ Source code in API/llm/models.py +
48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
def download_model(self):
+    """
+    Download the model from the model_details
+    :return:
+    """
+    download_url = hf_hub_url(repo_id=self.repo, filename=self.filename)
+    logger.critical(f"Downloading model from {download_url}")
+
+    model_general_folder = Path(
+        settings.BASE_DIR / "llm" / "llm_call" / "models" / self.model_family
+    )
+    logger.critical(f"Model folder {model_general_folder}")
+    model_general_folder.mkdir(parents=True, exist_ok=True)
+    filename = model_general_folder / self.filename
+
+    response = requests.get(download_url, stream=True)
+
+    # Total size in bytes.
+    total_size = int(response.headers.get("content-length", 0))
+    block_size = 1024  # 1 Kilobyte
+    logger.critical(f"Downloading {self.filename} to {model_general_folder}")
+    logger.critical(f"Total size: {total_size}")
+    progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
+    with open(filename, "wb") as file:
+        for data in response.iter_content(block_size):
+            progress_bar.update(len(data))
+            file.write(data)
+    progress_bar.close()
+
+    if total_size != 0 and progress_bar.n != total_size:
+        logger.error("ERROR, something went wrong")
+        return False
+    return True
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/serializers/index.html b/Sources/API/llm/serializers/index.html new file mode 100644 index 00000000..798feff5 --- /dev/null +++ b/Sources/API/llm/serializers/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Serializers - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Serializers

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/tests/index.html b/Sources/API/llm/tests/index.html new file mode 100644 index 00000000..18b9d297 --- /dev/null +++ b/Sources/API/llm/tests/index.html @@ -0,0 +1,4619 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Tests - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Tests

+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/urls/index.html b/Sources/API/llm/urls/index.html new file mode 100644 index 00000000..779d9745 --- /dev/null +++ b/Sources/API/llm/urls/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + URLs - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

URLs

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/llm/views/index.html b/Sources/API/llm/views/index.html new file mode 100644 index 00000000..c15bc03d --- /dev/null +++ b/Sources/API/llm/views/index.html @@ -0,0 +1,4839 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Views - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Views

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ LLMConfigViewSet + + +

+ + +
+

+ Bases: ModelViewSet

+ + +
+ Source code in API/llm/views.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
class LLMConfigViewSet(viewsets.ModelViewSet):
+    permission_classes = [IsAuthenticated]
+    serializer_class = LLMConfigRecordsSerializer
+    """
+    List all available llm config records
+    """
+    queryset = LLMConfigRecords.objects.all()
+
+    @swagger_auto_schema(
+        operation_summary="List LLM Model",
+        operation_description="Obtain the list of available LLM models and their status, need to have a token",
+        responses={200: LLMConfigRecordsSerializer(many=True)},
+        tags=["llm"],
+    )
+    @csrf_exempt
+    def list(self, request, *args, **kwargs):
+        """Override the post method to add custom swagger documentation."""
+        return super().list(request, *args, **kwargs)
+
+
+ + + +
+ + + + + + + +
+ + + +

+ serializer_class = LLMConfigRecordsSerializer + + + class-attribute + instance-attribute + + +

+ + +
+ +

List all available llm config records

+
+ +
+ + + +
+ + +

+ list(request, *args, **kwargs) + +

+ + +
+ +

Override the post method to add custom swagger documentation.

+ +
+ Source code in API/llm/views.py +
22
+23
+24
+25
+26
+27
+28
+29
+30
+31
@swagger_auto_schema(
+    operation_summary="List LLM Model",
+    operation_description="Obtain the list of available LLM models and their status, need to have a token",
+    responses={200: LLMConfigRecordsSerializer(many=True)},
+    tags=["llm"],
+)
+@csrf_exempt
+def list(self, request, *args, **kwargs):
+    """Override the post method to add custom swagger documentation."""
+    return super().list(request, *args, **kwargs)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/manage/index.html b/Sources/API/manage/index.html new file mode 100644 index 00000000..870f07e8 --- /dev/null +++ b/Sources/API/manage/index.html @@ -0,0 +1,4697 @@ + + + + + + + + + + + + + + + + + + + + + + + + + manage.md - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

manage.md

+ +
+ + + + +
+ +

Django's command-line utility for administrative tasks.

+ + + +
+ + + + + + + + + +
+ + +

+ main() + +

+ + +
+ +

Run administrative tasks.

+ +
+ Source code in API/manage.py +
 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
def main():
+    """Run administrative tasks."""
+    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings")
+    try:
+        from django.core.management import execute_from_command_line
+    except ImportError as exc:
+        raise ImportError(
+            "Couldn't import Django. Are you sure it's installed and "
+            "available on your PYTHONPATH environment variable? Did you "
+            "forget to activate a virtual environment?"
+        ) from exc
+    execute_from_command_line(sys.argv)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/admin/index.html b/Sources/API/orchestrator/admin/index.html new file mode 100644 index 00000000..7698cbee --- /dev/null +++ b/Sources/API/orchestrator/admin/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Admin - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Admin

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/apps/index.html b/Sources/API/orchestrator/apps/index.html new file mode 100644 index 00000000..75a07391 --- /dev/null +++ b/Sources/API/orchestrator/apps/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Apps - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Apps

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/clusters/index.html b/Sources/API/orchestrator/chain/clusters/index.html new file mode 100644 index 00000000..bdf5762b --- /dev/null +++ b/Sources/API/orchestrator/chain/clusters/index.html @@ -0,0 +1,4874 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Clusters - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Clusters

+ +
+ + + + +
+ + + +
+ + + + + + + +
+ + + +

+ CLUSTER_GPT_35_ETE_CONVERSATION = {'openai_speech2text': {'order': 0, 'extra_params': {}, 'component_type': 'task', 'task_name': 'openai_speech2text'}, 'completed_openai_speech2text': {'order': 1, 'extra_params': {}, 'component_type': 'signal', 'task_name': None}, 'created_data_text': {'order': 2, 'extra_params': {}, 'component_type': 'signal', 'task_name': None}, 'completed_openai_gpt_35': {'order': 3, 'extra_params': {'sample_ratio': 10, 'prompt_template': '{text}'}, 'component_type': 'task', 'task_name': 'openai_gpt_35'}, 'completed_openai_text2speech': {'order': 4, 'extra_params': {}, 'component_type': 'task', 'task_name': 'openai_text2speech'}} + + + module-attribute + + +

+ + +
+ +

Cluster for gpt3.5 model and gpt3.5 with RAG

+
+ +
+ +
+ + + +

+ CLUSTER_GPT_4O_TEXT_ETE_CONVERSATION = {'openai_speech2text': {'order': 0, 'extra_params': {}, 'component_type': 'task', 'task_name': 'openai_speech2text'}, 'completed_openai_speech2text': {'order': 1, 'extra_params': {}, 'component_type': 'signal', 'task_name': None}, 'created_data_text': {'order': 2, 'extra_params': {}, 'component_type': 'signal', 'task_name': None}, 'completed_openai_gpt_4o_text_only': {'order': 2, 'extra_params': {'sample_ratio': 10, 'prompt_template': '\n You are a robot, and you are talking to a human.\n\n Your task is to generate a response to the human based on the text\n\n You response will be directly send to end user.\n\n The text is: {text}\n '}, 'component_type': 'task', 'task_name': 'openai_gpt_4o_text_only'}, 'completed_openai_text2speech': {'order': 3, 'extra_params': {}, 'component_type': 'task', 'task_name': 'openai_text2speech'}} + + + module-attribute + + +

+ + +
+ +

Cluster for gpt3.5 model and gpt3.5 with RAG

+
+ +
+ +
+ + + +

+ CLUSTER_HF_ETE_CONVERSATION = {'speech2text': {'order': 0, 'extra_params': {}, 'component_type': 'task', 'task_name': 'speech2text'}, 'completed_speech2text': {'order': 1, 'extra_params': {}, 'component_type': 'signal', 'task_name': 'None'}, 'created_data_text': {'order': 2, 'extra_params': {}, 'component_type': 'signal', 'task_name': None}, 'completed_emotion_detection': {'order': 3, 'extra_params': {}, 'component_type': 'task', 'task_name': 'emotion_detection'}, 'completed_hf_llm': {'order': 4, 'extra_params': {'hf_model_name': 'Qwen/Qwen2-7B-Instruct'}, 'component_type': 'task', 'task_name': 'hf_llm'}, 'completed_text2speech': {'order': 5, 'extra_params': {}, 'component_type': 'task', 'task_name': 'text2speech'}} + + + module-attribute + + +

+ + +
+ +

Create one to use the full GPT-4o models.

+

In theory, it should takes the audio and video in, and then output audio.

+

However, until now, the API for audio is not yet available.

+

So we will use the walk around by using the speech to text model first, and then call GPT-4o

+
+ +
+ +
+ + + +

+ CLUSTER_Q_ETE_CONVERSATION = {'speech2text': {'order': 0, 'extra_params': {}, 'component_type': 'task', 'task_name': 'speech2text'}, 'completed_speech2text': {'order': 1, 'extra_params': {}, 'component_type': 'signal', 'task_name': None}, 'created_data_text': {'order': 2, 'extra_params': {}, 'component_type': 'signal', 'task_name': None}, 'completed_emotion_detection': {'order': 3, 'extra_params': {}, 'component_type': 'task', 'task_name': 'emotion_detection'}, 'completed_quantization_llm': {'order': 4, 'extra_params': {'llm_model_name': 'SOLAR-10'}, 'component_type': 'task', 'task_name': 'quantization_llm'}, 'completed_text2speech': {'order': 5, 'extra_params': {}, 'component_type': 'task', 'task_name': 'text2speech'}} + + + module-attribute + + +

+ + +
+ +

Get rid of the emotion detection model

+
+ +
+ +
+ + + +

+ CLUSTER_Q_NO_EMOTION_ETE_CONVERSATION = {'speech2text': {'order': 0, 'extra_params': {}, 'component_type': 'task', 'task_name': 'speech2text'}, 'completed_speech2text': {'order': 1, 'extra_params': {}, 'component_type': 'signal', 'task_name': None}, 'created_data_text': {'order': 2, 'extra_params': {}, 'component_type': 'signal', 'task_name': None}, 'completed_quantization_llm': {'order': 4, 'extra_params': {'llm_model_name': 'SOLAR-10'}, 'component_type': 'task', 'task_name': 'quantization_llm'}, 'completed_text2speech': {'order': 5, 'extra_params': {}, 'component_type': 'task', 'task_name': 'text2speech'}} + + + module-attribute + + +

+ + +
+ +

This is the pipeline using the HF LLM model for the ETE conversation

+
+ +
+ +
+ + + +

+ logger = get_logger(__name__) + + + module-attribute + + +

+ + +
+ +

This is for the quantization LLM model for the ETE conversation

+
+ +
+ + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_emotion_detection/index.html b/Sources/API/orchestrator/chain/completed_emotion_detection/index.html new file mode 100644 index 00000000..005120b6 --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_emotion_detection/index.html @@ -0,0 +1,4789 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedEmotionDetection - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedEmotionDetection

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_emotion_detection(sender, **kwargs) + +

+ + +
+ +

This will create a task to do the quantization LLM inference

+ +
+ Source code in API/orchestrator/chain/completed_emotion_detection.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
@receiver(completed_emotion_detection)
+def trigger_completed_emotion_detection(sender, **kwargs):
+    """
+    This will create a task to do the quantization LLM inference
+
+    """
+    try:
+        logger.info("Emotion detection completed triggerred")
+        data = kwargs.get("data", {})
+        track_id = kwargs.get("track_id", None)
+        logger.info(data)
+        task_data = TaskData(**data)
+
+        if track_id is None:
+            logger.error("No track_id found")
+            return
+        data_text_id = task_data.parameters.get("data_text_id", None)
+
+        # get the text and emotion from the result
+        text = task_data.parameters["text"]
+        emotion = task_data.result_json["result_profile"].get("multi_modal_output", {})
+        data_multimodal_conversation_log_context_emotion_detection(
+            task_data=task_data, result=emotion
+        )
+        emotion_text = (
+            "Emotion value is from -1 to 1, -1 means negative, 1 means positive\n"
+        )
+
+        for key, value in emotion.items():
+            if key == "A":
+                emotion_text += f"Audio emotion: {value}\n"
+            if key == "T":
+                emotion_text += f"Text emotion: {value}\n"
+            if key == "V":
+                emotion_text += f"Video emotion: {value}\n"
+            if key == "M":
+                emotion_text += f"Overall emotion: {value}\n"
+        prompt = f"""
+            You are a conversational AI.
+            Your friend said: {text}.
+            And his emotion is detected like this:
+            {emotion_text}
+
+            Respond to him.
+            Your response will directly send to him.
+
+            """
+
+        ClusterManager.chain_next(
+            track_id=track_id,
+            current_component="completed_emotion_detection",
+            next_component_params={"text": prompt, "data_text_id": data_text_id},
+            user=sender.user,
+        )
+
+    except Exception as e:
+        logger.exception(e)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_hf_llm/index.html b/Sources/API/orchestrator/chain/completed_hf_llm/index.html new file mode 100644 index 00000000..e38040a1 --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_hf_llm/index.html @@ -0,0 +1,4744 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedHFLLM - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedHFLLM

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_hf_llm(sender, **kwargs) + +

+ + +
+ +

This will create the response, which will be a text 2 text task +We will create the ResText here

+ +
+ Source code in API/orchestrator/chain/completed_hf_llm.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
@receiver(completed_hf_llm)
+def trigger_completed_hf_llm(sender, **kwargs):  # noqa
+    """
+    This will create the response, which will be a text 2 text task
+    We will create the ResText here
+    """
+    try:
+        logger.info("HF LLM completed triggerred")
+        data = kwargs.get("data", {})
+        track_id = kwargs.get("track_id", None)
+        logger.info(data)
+        task_data = TaskData(**data)
+
+        if track_id is None:
+            logger.error("No track_id found")
+            return
+
+        text = task_data.result_json["result_profile"]["text"]
+        # grab the multi-modal conversation
+        data_multimodal_conversation_log_res_text(
+            task_data=task_data,
+            text=text,
+        )
+        data_text_id = task_data.parameters.get("data_text_id", None)
+        ClusterManager.chain_next(
+            track_id=track_id,
+            current_component="completed_hf_llm",
+            next_component_params={"text": text, "data_text_id": data_text_id},
+            user=sender.user,
+        )
+
+    except Exception as e:
+        logger.exception(e)
+        return
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_openai_gpt_35/index.html b/Sources/API/orchestrator/chain/completed_openai_gpt_35/index.html new file mode 100644 index 00000000..0d710458 --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_openai_gpt_35/index.html @@ -0,0 +1,4741 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedOpenAIGPT35 - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedOpenAIGPT35

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_openai_gpt_35(sender, **kwargs) + +

+ + +
+ +

This will create the response, which will be a text 2 text task

+ +
+ Source code in API/orchestrator/chain/completed_openai_gpt_35.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
@receiver(completed_openai_gpt_35)
+def trigger_completed_openai_gpt_35(sender, **kwargs):  # noqa
+    """
+    This will create the response, which will be a text 2 text task
+    """
+    try:
+        logger.info("OpenAI GPT 35 LLM completed triggerred")
+        data = kwargs.get("data", {})
+        track_id = kwargs.get("track_id", None)
+        logger.info(data)
+        task_data = TaskData(**data)
+
+        if track_id is None:
+            logger.error("No track_id found")
+            return
+
+        text = task_data.result_json["result_profile"]["text"]
+        logger.info(text)
+        data_multimodal_conversation_log_res_text(
+            task_data=task_data,
+            text=text,
+        )
+        data_text_id = task_data.parameters.get("data_text_id", None)
+        ClusterManager.chain_next(
+            track_id=track_id,
+            current_component="completed_openai_gpt_35",
+            next_component_params={"text": text, "data_text_id": data_text_id},
+            user=sender.user,
+        )
+
+    except Exception as e:
+        logger.exception(e)
+        return
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_openai_gpt_4o_text_and_image/index.html b/Sources/API/orchestrator/chain/completed_openai_gpt_4o_text_and_image/index.html new file mode 100644 index 00000000..6dea5b0f --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_openai_gpt_4o_text_and_image/index.html @@ -0,0 +1,4745 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedOpenAIGPT4oTextAndImage - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedOpenAIGPT4oTextAndImage

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_openai_gpt_4o_text_and_image(sender, **kwargs) + +

+ + +
+ +

This will create the response, which will be a text 2 text task

+ +
+ Source code in API/orchestrator/chain/completed_openai_gpt_4o_text_and_image.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
@receiver(completed_openai_gpt_4o_text_and_image)
+def trigger_completed_openai_gpt_4o_text_and_image(sender, **kwargs):  # noqa
+    """
+    This will create the response, which will be a text 2 text task
+    """
+    try:
+        logger.info("OpenAI GPT 4o LLM completed triggerred")
+        data = kwargs.get("data", {})
+        track_id = kwargs.get("track_id", None)
+        logger.info(data)
+        task_data = TaskData(**data)
+
+        if track_id is None:
+            logger.error("No track_id found")
+            return
+
+        text = task_data.result_json["result_profile"]["text"]
+        logger.critical(text)
+        # grab the multi-modal conversation
+        data_multimodal_conversation_log_res_text(
+            task_data=task_data,
+            text=text,
+        )
+        data_text_id = task_data.parameters.get("data_text_id", None)
+
+        ClusterManager.chain_next(
+            track_id=track_id,
+            current_component="completed_openai_gpt_4o_text_and_image",
+            next_component_params={"text": text, "data_text_id": data_text_id},
+            user=sender.user,
+        )
+
+    except Exception as e:
+        logger.exception(e)
+        return
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_openai_gpt_4o_text_only/index.html b/Sources/API/orchestrator/chain/completed_openai_gpt_4o_text_only/index.html new file mode 100644 index 00000000..423c1324 --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_openai_gpt_4o_text_only/index.html @@ -0,0 +1,4741 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedOpenAIGPT4oTextOnly - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedOpenAIGPT4oTextOnly

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_openai_gpt_4o_text_only(sender, **kwargs) + +

+ + +
+ +

This will create the response, which will be a text 2 text task

+ +
+ Source code in API/orchestrator/chain/completed_openai_gpt_4o_text_only.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
@receiver(completed_openai_gpt_4o_text_only)
+def trigger_completed_openai_gpt_4o_text_only(sender, **kwargs):  # noqa
+    """
+    This will create the response, which will be a text 2 text task
+    """
+    try:
+        logger.info("OpenAI GPT 4o LLM completed triggerred")
+        data = kwargs.get("data", {})
+        track_id = kwargs.get("track_id", None)
+        logger.info(data)
+        task_data = TaskData(**data)
+
+        if track_id is None:
+            logger.error("No track_id found")
+            return
+
+        text = task_data.result_json["result_profile"]["text"]
+        logger.info(text)
+        data_multimodal_conversation_log_res_text(
+            task_data=task_data,
+            text=text,
+        )
+        data_text_id = task_data.parameters.get("data_text_id", None)
+        ClusterManager.chain_next(
+            track_id=track_id,
+            current_component="completed_openai_gpt_4o_text_only",
+            next_component_params={"text": text, "data_text_id": data_text_id},
+            user=sender.user,
+        )
+
+    except Exception as e:
+        logger.exception(e)
+        return
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_openai_speech2text/index.html b/Sources/API/orchestrator/chain/completed_openai_speech2text/index.html new file mode 100644 index 00000000..5ff283c0 --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_openai_speech2text/index.html @@ -0,0 +1,4849 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedOpenAISpeech2Text - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedOpenAISpeech2Text

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_openai_speech2text(sender, **kwargs) + +

+ + +
+ +

We will need to gather the text, and then grab the video to the next step

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sender + +
+

The sender of the signal

+
+
+ required +
**kwargs + +
+

The data passed to the signal

+
+
+ {} +
+

Returns:

+ +
+ Source code in API/orchestrator/chain/completed_openai_speech2text.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
@receiver(completed_openai_speech2text)
+def trigger_completed_openai_speech2text(sender, **kwargs):
+    """
+    We will need to gather the text, and then grab the video to the next step
+
+    Args:
+        sender: The sender of the signal
+        **kwargs: The data passed to the signal
+
+    Returns:
+
+    """
+    logger.info("OpenAI Speech2Text completed triggerred")
+    data = kwargs.get("data", {})
+    track_id = kwargs.get("track_id", None)
+    task_data = TaskData(**data)
+    params = task_data.parameters
+    logger.info(track_id)
+
+    # get the text
+    result_json = task_data.result_json
+    result_profile = result_json.get("result_profile", {})
+    text = result_profile.get("text", "")
+    logger.info(text)
+
+    # Currently GPT-4o can only take images, so we will try to locate the relevant images
+    uid = params.get("uid")
+    home_id = params.get("home_id")
+    audio_index = params.get("audio_index")
+
+    audio = DataAudio.objects.filter(
+        home_id=home_id, uid=uid, sequence_index=audio_index
+    )
+
+    if not audio:
+        logger.error("Audio not found")
+        return
+    if len(audio) > 1:
+        logger.error("Multiple audio found")
+        return
+    audio_obj = audio.first()
+
+    data_text_obj = DataText.objects.filter(audio=audio_obj).first()
+    if data_text_obj:
+        data_text_obj.text = text
+        data_text_obj.save()
+    else:
+        data_text_obj = DataText(
+            audio=audio_obj,
+            text=text,
+        )
+        data_text_obj.save()
+
+    if not hasattr(audio_obj, "multi_modal_conversation"):
+        DataMultiModalConversation.objects.create(
+            audio=audio_obj,
+        )
+    audio_obj.multi_modal_conversation.text = data_text_obj
+    audio_obj.multi_modal_conversation.save()
+
+    ClusterManager.chain_next(
+        track_id=track_id,
+        current_component="completed_openai_speech2text",
+        next_component_params={"sender": data_text_obj, "data": data_text_obj.__dict__},
+        user=sender.user,
+    )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_openai_text2speech/index.html b/Sources/API/orchestrator/chain/completed_openai_text2speech/index.html new file mode 100644 index 00000000..1dd84964 --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_openai_text2speech/index.html @@ -0,0 +1,4802 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedOpenAIText2Speech - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedOpenAIText2Speech

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_openai_text2speech(sender, **kwargs) + +

+ + +
+ +

After the text2speech is done, save it to the database

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sender + +
+

The sender of the signal

+
+
+ required +
kwargs + +
+

The data passed to the signal

+
+
+ {} +
+ +
+ Source code in API/orchestrator/chain/completed_openai_text2speech.py +
13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
@receiver(completed_openai_text2speech)
+def trigger_completed_openai_text2speech(sender, **kwargs):
+    """
+    After the text2speech is done, save it to the database
+
+    Args:
+        sender: The sender of the signal
+        kwargs: The data passed to the signal
+    """
+    logger.info("OpenAI Text2Speech completed triggerred")
+    try:
+        data = kwargs.get("data", {})
+        track_id = kwargs.get("track_id", None)
+        logger.info(data)
+        task_data = TaskData(**data)
+
+        if track_id is None:
+            logger.error("No track_id found")
+            return
+        # get the speech2text task based on the track_id
+        speech2text_task = (
+            Task.objects.filter(track_id=track_id, task_name="openai_text2speech")
+            .order_by("-created_at")
+            .first()
+        )
+        if speech2text_task is None:
+            logger.error("No speech2text task found")
+            return
+        logger.info(speech2text_task.parameters)
+        text2speech_file = task_data.result_json["result_profile"].get(
+            "audio_file_path", ""
+        )
+        ResSpeech.objects.create(text2speech_file=text2speech_file)
+
+        # this is the end of the chain
+        data_multimodal_conversation_log_res_speech(
+            task_data=task_data,
+            speech_file_path=text2speech_file,
+        )
+
+    except Exception as e:
+        logger.exception(e)
+        return
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_quantization_llm/index.html b/Sources/API/orchestrator/chain/completed_quantization_llm/index.html new file mode 100644 index 00000000..5e84cf44 --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_quantization_llm/index.html @@ -0,0 +1,4744 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedQuantizationLLM - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedQuantizationLLM

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_quantization_llm(sender, **kwargs) + +

+ + +
+ +

This will create the response, which will be a text 2 text task +And we will need to log this ResText

+ +
+ Source code in API/orchestrator/chain/completed_quantization_llm.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
@receiver(completed_quantization_llm)
+def trigger_completed_quantization_llm(sender, **kwargs):  # noqa
+    """
+    This will create the response, which will be a text 2 text task
+    And we will need to log this ResText
+    """
+    try:
+        logger.info("Quantization LLM completed triggerred")
+        data = kwargs.get("data", {})
+        track_id = kwargs.get("track_id", None)
+        logger.info(data)
+        task_data = TaskData(**data)
+
+        if track_id is None:
+            logger.error("No track_id found")
+            return
+
+        text = task_data.result_json["result_profile"]["text"]
+        # then we need to locate the conversation task
+        data_multimodal_conversation_log_res_text(
+            task_data=task_data,
+            text=text,
+        )
+        data_text_id = task_data.parameters.get("data_text_id", None)
+        ClusterManager.chain_next(
+            track_id=track_id,
+            current_component="completed_quantization_llm",
+            next_component_params={"text": text, "data_text_id": data_text_id},
+            user=sender.user,
+        )
+
+    except Exception as e:
+        logger.exception(e)
+        return
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_rag/index.html b/Sources/API/orchestrator/chain/completed_rag/index.html new file mode 100644 index 00000000..351c8d6f --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_rag/index.html @@ -0,0 +1,4744 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedRAG - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedRAG

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_rag(sender, **kwargs) + +

+ + +
+ +

This will create the response, which will be a text 2 text task +And we will need to log this ResText

+ +
+ Source code in API/orchestrator/chain/completed_rag.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
@receiver(completed_rag)
+def trigger_completed_rag(sender, **kwargs):  # noqa
+    """
+    This will create the response, which will be a text 2 text task
+    And we will need to log this ResText
+    """
+    try:
+        logger.info("RAG completed triggerred")
+        data = kwargs.get("data", {})
+        track_id = kwargs.get("track_id", None)
+        logger.info(data)
+        task_data = TaskData(**data)
+
+        if track_id is None:
+            logger.error("No track_id found")
+            return
+
+        text = task_data.result_json["result_profile"]["text"]
+        # then we need to locate the conversation task
+        data_multimodal_conversation_log_context_rag(
+            task_data=task_data,
+            result=task_data.result_json["result_profile"],
+        )
+        data_text_id = task_data.parameters.get("data_text_id", None)
+        ClusterManager.chain_next(
+            track_id=track_id,
+            current_component="completed_rag",
+            next_component_params={"text": text, "data_text_id": data_text_id},
+            user=sender.user,
+        )
+
+    except Exception as e:
+        logger.exception(e)
+        return
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_speech2text/index.html b/Sources/API/orchestrator/chain/completed_speech2text/index.html new file mode 100644 index 00000000..0cee0564 --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_speech2text/index.html @@ -0,0 +1,4830 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedSpeech2Text - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedSpeech2Text

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_speech2text(sender, **kwargs) + +

+ + +
+ +

After the speech2text is done, save it to the database

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sender + +
+

The sender of the signal

+
+
+ required +
kwargs + +
+

The data passed to the signal

+
+
+ {} +
+ +
+ Source code in API/orchestrator/chain/completed_speech2text.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
@receiver(completed_speech2text)
+def trigger_completed_speech2text(sender, **kwargs):
+    """
+    After the speech2text is done, save it to the database
+
+    Args:
+        sender: The sender of the signal
+        kwargs: The data passed to the signal
+    """
+    logger.info("Speech2Text completed triggerred")
+    data = kwargs.get("data", {})
+    track_id = kwargs.get("track_id", None)
+    task_data = TaskData(**data)
+    params = task_data.parameters
+    logger.info(track_id)
+
+    uid = params.get("uid")
+    home_id = params.get("home_id")
+    audio_index = params.get("audio_index")
+
+    audio = DataAudio.objects.filter(
+        home_id=home_id, uid=uid, sequence_index=audio_index
+    )
+    logger.debug(audio)
+    if not audio:
+        logger.error("Audio not found")
+        return
+    if len(audio) > 1:
+        logger.error("Multiple audio found")
+        return
+    audio_obj = audio.first()
+
+    # save the data to the database
+    result_json = task_data.result_json
+    result_profile = result_json.get("result_profile", {})
+    text = result_profile.get("text", "")
+    logger.debug(result_json)
+    data_text_obj = DataText.objects.filter(audio=audio_obj).first()
+    if data_text_obj:
+        data_text_obj.text = text
+        data_text_obj.save()
+    else:
+        data_text_obj = DataText(
+            audio=audio_obj,
+            text=text,
+        )
+        data_text_obj.save()
+
+    audio_obj.multi_modal_conversation.text = data_text_obj
+    audio_obj.multi_modal_conversation.save()
+
+    ClusterManager.chain_next(
+        track_id=track_id,
+        current_component="completed_speech2text",
+        next_component_params={"sender": data_text_obj, "data": data_text_obj.__dict__},
+        user=sender.user,
+    )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_task/index.html b/Sources/API/orchestrator/chain/completed_task/index.html new file mode 100644 index 00000000..5585e31a --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_task/index.html @@ -0,0 +1,4821 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedTask - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedTask

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_task(sender, **kwargs) + +

+ + +
+ +

Trigger the multi-modal emotion detection.

+ +
+ Source code in API/orchestrator/chain/completed_task.py +
24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
@receiver(completed_task)
+def trigger_completed_task(sender, **kwargs):
+    """
+    Trigger the multi-modal emotion detection.
+    """
+    data = kwargs.get("data", {})
+    task_data = TaskData(**data)
+
+    if task_data.task_name == "speech2text":
+        return completed_speech2text.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "emotion_detection":
+        return completed_emotion_detection.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "quantization_llm":
+        return completed_quantization_llm.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "text2speech":
+        logger.info("Text2Speech task completed")
+        return completed_text2speech.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "hf_llm":
+        logger.info("HF LLM task completed")
+        return completed_hf_llm.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "openai_speech2text":
+        logger.info("OpenAI Speech2Text task completed")
+        return completed_openai_speech2text.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "openai_gpt_4o_text_and_image":
+        logger.info("OpenAI GPT4O task completed")
+        return completed_openai_gpt_4o_text_and_image.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+    if task_data.task_name == "openai_gpt_35":
+        logger.info("OpenAI GPT3.5 task completed")
+        return completed_openai_gpt_35.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "openai_gpt_4o_text_only":
+        logger.info("OpenAI GPT4O Text Only task completed")
+        return completed_openai_gpt_4o_text_only.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+    if task_data.task_name == "rag":
+        logger.info("RAG task completed")
+        return completed_rag.send(sender=sender, data=data, track_id=task_data.track_id)
+
+    if task_data.task_name == "openai_text2speech":
+        logger.info("OpenAI Text2Speech task completed")
+        return completed_openai_text2speech.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    task_name_choices = Task.get_task_name_choices()
+    task_name_choices_list = [task[0] for task in task_name_choices]
+    if task_data.task_name not in task_name_choices_list:
+        logger.error("Task name not found is not in the choices list")
+        return
+    logger.critical(f"{task_data.task_name} task completed, however, no action taken.")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/completed_text2speech/index.html b/Sources/API/orchestrator/chain/completed_text2speech/index.html new file mode 100644 index 00000000..4b1b00d0 --- /dev/null +++ b/Sources/API/orchestrator/chain/completed_text2speech/index.html @@ -0,0 +1,4800 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CompletedText2Speech - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CompletedText2Speech

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_completed_text2speech(sender, **kwargs) + +

+ + +
+ +

After the text2speech is done, save it to the database

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sender + +
+

The sender of the signal

+
+
+ required +
kwargs + +
+

The data passed to the signal

+
+
+ {} +
+ +
+ Source code in API/orchestrator/chain/completed_text2speech.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
@receiver(completed_text2speech)
+def trigger_completed_text2speech(sender, **kwargs):
+    """
+    After the text2speech is done, save it to the database
+
+    Args:
+        sender: The sender of the signal
+        kwargs: The data passed to the signal
+    """
+    logger.info("Text2Speech completed triggerred")
+    try:
+        data = kwargs.get("data", {})
+        track_id = kwargs.get("track_id", None)
+        logger.info(data)
+        task_data = TaskData(**data)
+
+        if track_id is None:
+            logger.error("No track_id found")
+            return
+        # get the speech2text task based on the track_id
+        speech2text_task = (
+            Task.objects.filter(track_id=track_id, task_name="speech2text")
+            .order_by("-created_at")
+            .first()
+        )
+        if speech2text_task is None:
+            logger.error("No speech2text task found")
+            return
+        text = speech2text_task.result_json["result_profile"].get("text", "")
+        logger.info(text)
+        logger.info(speech2text_task.parameters)
+        text2speech_file = task_data.result_json["result_profile"].get(
+            "audio_file_path", ""
+        )
+        data_multimodal_conversation_log_res_speech(
+            task_data=task_data,
+            speech_file_path=text2speech_file,
+        )
+
+    except Exception as e:
+        logger.exception(e)
+        return
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/created_data_text/index.html b/Sources/API/orchestrator/chain/created_data_text/index.html new file mode 100644 index 00000000..3d9472a7 --- /dev/null +++ b/Sources/API/orchestrator/chain/created_data_text/index.html @@ -0,0 +1,4879 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CreatedDataText - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CreatedDataText

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ trigger_created_data_text(sender, **kwargs) + +

+ + +
+ +

This function will trigger the emotion detection model with the latest data

+

It will first look for the latest data_text, +and then get the audio and image data based on the time range of the audio data

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sender + +
+

The sender of the signal

+
+
+ required +
kwargs + +
+

The data passed to the signal

+
+
+ {} +
+

Returns:

+ +
+ Source code in API/orchestrator/chain/created_data_text.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
@receiver(created_data_text)
+def trigger_created_data_text(sender, **kwargs):
+    """
+    This function will trigger the emotion detection model with the latest data
+
+    It will first look for the latest data_text,
+    and then get the audio and image data based on the time range of the audio data
+
+    Args:
+        sender: The sender of the signal
+        kwargs: The data passed to the signal
+    Returns:
+
+    """
+    data = kwargs.get("data", {})
+    track_id = kwargs.get("track_id", None)
+    data_text_id = data.get("id", None)
+    logger.debug(track_id)
+    # get the audio data, which have not been process and have the text information
+    if data_text_id:
+        data_text = DataText.objects.get(id=data_text_id)
+    else:
+        logger.error("No data_text_id found")
+        return None, None, None, None
+
+    text = data_text.text
+    data_audio = data_text.audio
+
+    audio_file = f"audio/{data_audio.uid}/{data_audio.audio_file}"
+
+    # get the image data based on the audio data time range
+    # TODO: this will be changed rapidly
+    start_time = data_audio.start_time
+    end_time = data_audio.end_time
+    # round the start to the minute level down
+    start_time = start_time.replace(second=0, microsecond=0)
+    # round the end to the minute level up
+    end_time = end_time.replace(second=0, microsecond=0) + timedelta(minutes=1)
+    logger.info(f"Start time: {start_time}, End time: {end_time}")
+    logger.info(data_audio)
+    # we will assume it comes from the same device
+    # list all videos has overlap with [start_time, end_time]
+    # get start_time and end_time has overlap with [start_time, end_time]
+    videos_data = DataVideo.objects.filter(
+        Q(start_time__lt=end_time, end_time__gt=start_time)
+    )
+
+    data_audio.multi_modal_conversation.video.add(*videos_data)
+    data_audio.multi_modal_conversation.save()
+    images_path = []
+    for video_data in videos_data:
+        image_folder_name = video_data.video_file.split(".")[0].rsplit("-", 1)[0]
+        images_path.append(f"{video_data.uid}/frames/{image_folder_name}")
+
+    # I need to read image files into List[np.ndarray]
+    images_path_list = []
+    for image_path in images_path:
+        # loop the path, get all images
+        folder = f"videos/{image_path}"
+        images_path_list.append(folder)
+
+    # trigger the model
+    logger.info(f"Text: {text}, Audio: {audio_file}, Images: {len(images_path_list)}")
+
+    task_params = {
+        "text": text,
+        "audio_file": audio_file,
+        "images_path_list": images_path_list,
+        "data_text_id": data_text.id,
+    }
+
+    user = kwargs.get("user", None)
+    ClusterManager.chain_next(
+        track_id=track_id,
+        current_component="created_data_text",
+        next_component_params=task_params,
+        user=user,
+    )
+
+    return text, [audio_file], images_path_list, data_text
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/manager/index.html b/Sources/API/orchestrator/chain/manager/index.html new file mode 100644 index 00000000..e16e600b --- /dev/null +++ b/Sources/API/orchestrator/chain/manager/index.html @@ -0,0 +1,5539 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Manager - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Manager

+ +
+ + + + +
+ +

Here will define a list of clusters

+

Each cluster will have a list of chain components

+

For example, end-to-end conversation chain will have the following components:

+
    +
  • completed_speech2text
  • +
  • created_data_text
  • +
  • completed_emotion_detection
  • +
  • completed_quantization_llm
  • +
  • completed_text2speech
  • +
+ + + +
+ + + + + + + + +
+ + + +

+ ClusterManager + + +

+ + +
+ + +
+ Source code in API/orchestrator/chain/manager.py +
 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
class ClusterManager:
+
+    @staticmethod
+    def get_cluster(cluster_name: str):
+        """
+        Get the cluster
+
+        Args:
+            cluster_name (str): The cluster name
+        """
+        if cluster_name in CLUSTERS:
+            return CLUSTERS[cluster_name]
+        return None
+
+    @staticmethod
+    def get_next_chain_component(
+        cluster: dict, current_component: str
+    ) -> Tuple[Optional[str], Optional[dict]]:
+        """
+        Get the next chain
+
+        Args:
+            cluster (dict): The cluster
+            current_component (str): The current component
+
+        Return:
+            Tuple[Optional[str], Optional[dict]]: The next component and its parameters if exists, otherwise None
+        """
+        chain = []
+        for key, value in cluster.items():
+            chain.append(key)
+        chain.sort(key=lambda x: cluster[x]["order"])
+        if current_component == "init":
+            """
+            If this is the start of the chain, then return the first component
+            """
+            return chain[0], cluster[chain[0]]
+        # index of the current component
+        current_component_index = chain.index(current_component)
+        next_index = current_component_index + 1
+        if next_index >= len(chain):
+            return None, None
+        return chain[next_index], cluster[chain[next_index]]
+
+    @classmethod
+    def get_next(cls, cluster_name: str, current_component: str):
+        """
+        Get the next component
+
+        Args:
+            cluster_name (str): The cluster name
+            current_component (str): The current component
+        """
+        cluster = cls.get_cluster(cluster_name)
+        if cluster is None:
+            return None
+        return ClusterManager.get_next_chain_component(cluster, current_component)
+
+    @classmethod
+    def chain_next(
+        cls,
+        track_id: Optional[str],
+        current_component: str,
+        next_component_params: dict,
+        name: str = None,
+        user=None,
+    ):
+        """
+        Chain to the next component
+
+        Args:
+            current_component (str): The current component
+            track_id (str): The track ID
+            next_component_params (dict): The next component parameters
+            name (str): The task name, it will be used to aggregate the task
+            user (None): The user
+        """
+        logger.info(f"Current component: {current_component}")
+        logger.info(f"Next component params: {next_component_params}")
+        cluster_name = track_id.split("-")[1]
+        next_component_name, next_component = cls.get_next(
+            cluster_name, current_component
+        )
+        logger.info(f"Next component: {next_component_name}")
+
+        if next_component_name is None:
+            return
+        # do something with the next component
+        # It can be a task or a signal
+        next_parameters = {
+            **next_component_params,
+            **next_component.get("extra_params", {}),
+        }
+        logger.info(next_parameters)
+        logger.info(next_component_name)
+
+        if next_component["component_type"] == "task":
+            task = Task.create_task(
+                user=user,
+                name=name or next_component["task_name"],
+                task_name=next_component["task_name"],
+                parameters=next_parameters,
+                track_id=track_id,
+            )
+            logger.info(f"Task {task.id} created for {next_component['task_name']}")
+            return task.id
+        elif next_component["component_type"] == "signal":
+            if next_component_name == "created_data_text":
+                created_data_text.send(
+                    sender=next_component_params.get("sender"),
+                    data=next_component_params.get("data"),
+                    track_id=track_id,
+                    user=user,
+                )
+        return None
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ chain_next(track_id, current_component, next_component_params, name=None, user=None) + + + classmethod + + +

+ + +
+ +

Chain to the next component

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
current_component + str + +
+

The current component

+
+
+ required +
track_id + str + +
+

The track ID

+
+
+ required +
next_component_params + dict + +
+

The next component parameters

+
+
+ required +
name + str + +
+

The task name, it will be used to aggregate the task

+
+
+ None +
user + None + +
+

The user

+
+
+ None +
+ +
+ Source code in API/orchestrator/chain/manager.py +
 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
@classmethod
+def chain_next(
+    cls,
+    track_id: Optional[str],
+    current_component: str,
+    next_component_params: dict,
+    name: str = None,
+    user=None,
+):
+    """
+    Chain to the next component
+
+    Args:
+        current_component (str): The current component
+        track_id (str): The track ID
+        next_component_params (dict): The next component parameters
+        name (str): The task name, it will be used to aggregate the task
+        user (None): The user
+    """
+    logger.info(f"Current component: {current_component}")
+    logger.info(f"Next component params: {next_component_params}")
+    cluster_name = track_id.split("-")[1]
+    next_component_name, next_component = cls.get_next(
+        cluster_name, current_component
+    )
+    logger.info(f"Next component: {next_component_name}")
+
+    if next_component_name is None:
+        return
+    # do something with the next component
+    # It can be a task or a signal
+    next_parameters = {
+        **next_component_params,
+        **next_component.get("extra_params", {}),
+    }
+    logger.info(next_parameters)
+    logger.info(next_component_name)
+
+    if next_component["component_type"] == "task":
+        task = Task.create_task(
+            user=user,
+            name=name or next_component["task_name"],
+            task_name=next_component["task_name"],
+            parameters=next_parameters,
+            track_id=track_id,
+        )
+        logger.info(f"Task {task.id} created for {next_component['task_name']}")
+        return task.id
+    elif next_component["component_type"] == "signal":
+        if next_component_name == "created_data_text":
+            created_data_text.send(
+                sender=next_component_params.get("sender"),
+                data=next_component_params.get("data"),
+                track_id=track_id,
+                user=user,
+            )
+    return None
+
+
+
+ +
+ +
+ + +

+ get_cluster(cluster_name) + + + staticmethod + + +

+ + +
+ +

Get the cluster

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cluster_name + str + +
+

The cluster name

+
+
+ required +
+ +
+ Source code in API/orchestrator/chain/manager.py +
28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
@staticmethod
+def get_cluster(cluster_name: str):
+    """
+    Get the cluster
+
+    Args:
+        cluster_name (str): The cluster name
+    """
+    if cluster_name in CLUSTERS:
+        return CLUSTERS[cluster_name]
+    return None
+
+
+
+ +
+ +
+ + +

+ get_next(cluster_name, current_component) + + + classmethod + + +

+ + +
+ +

Get the next component

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cluster_name + str + +
+

The cluster name

+
+
+ required +
current_component + str + +
+

The current component

+
+
+ required +
+ +
+ Source code in API/orchestrator/chain/manager.py +
70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
@classmethod
+def get_next(cls, cluster_name: str, current_component: str):
+    """
+    Get the next component
+
+    Args:
+        cluster_name (str): The cluster name
+        current_component (str): The current component
+    """
+    cluster = cls.get_cluster(cluster_name)
+    if cluster is None:
+        return None
+    return ClusterManager.get_next_chain_component(cluster, current_component)
+
+
+
+ +
+ +
+ + +

+ get_next_chain_component(cluster, current_component) + + + staticmethod + + +

+ + +
+ +

Get the next chain

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cluster + dict + +
+

The cluster

+
+
+ required +
current_component + str + +
+

The current component

+
+
+ required +
+ + +
+ Return +

Tuple[Optional[str], Optional[dict]]: The next component and its parameters if exists, otherwise None

+
+
+ Source code in API/orchestrator/chain/manager.py +
40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
@staticmethod
+def get_next_chain_component(
+    cluster: dict, current_component: str
+) -> Tuple[Optional[str], Optional[dict]]:
+    """
+    Get the next chain
+
+    Args:
+        cluster (dict): The cluster
+        current_component (str): The current component
+
+    Return:
+        Tuple[Optional[str], Optional[dict]]: The next component and its parameters if exists, otherwise None
+    """
+    chain = []
+    for key, value in cluster.items():
+        chain.append(key)
+    chain.sort(key=lambda x: cluster[x]["order"])
+    if current_component == "init":
+        """
+        If this is the start of the chain, then return the first component
+        """
+        return chain[0], cluster[chain[0]]
+    # index of the current component
+    current_component_index = chain.index(current_component)
+    next_index = current_component_index + 1
+    if next_index >= len(chain):
+        return None, None
+    return chain[next_index], cluster[chain[next_index]]
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/models/index.html b/Sources/API/orchestrator/chain/models/index.html new file mode 100644 index 00000000..0a9042a2 --- /dev/null +++ b/Sources/API/orchestrator/chain/models/index.html @@ -0,0 +1,4637 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Models - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Models

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/signals/index.html b/Sources/API/orchestrator/chain/signals/index.html new file mode 100644 index 00000000..8e9a828c --- /dev/null +++ b/Sources/API/orchestrator/chain/signals/index.html @@ -0,0 +1,4637 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Signals - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Signals

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/chain/utils/index.html b/Sources/API/orchestrator/chain/utils/index.html new file mode 100644 index 00000000..7309a6de --- /dev/null +++ b/Sources/API/orchestrator/chain/utils/index.html @@ -0,0 +1,5153 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Utils - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Utils

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ data_multimodal_conversation_log_context_emotion_detection(task_data, result, logs=None) + +

+ + +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task_data + TaskData + +
+

the task data

+
+
+ required +
result + dict + +
+

the result of the context emotion detection

+
+
+ required +
logs + dict + +
+

the logs of the context emotion detection

+
+
+ None +
+

Returns:

+ +
+ Source code in API/orchestrator/chain/utils.py +
52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
def data_multimodal_conversation_log_context_emotion_detection(
+    task_data: TaskData, result: dict, logs: dict = None
+):
+    """
+
+    Args:
+        task_data (TaskData): the task data
+        result (dict): the result of the context emotion detection
+        logs (dict): the logs of the context emotion detection
+
+    Returns:
+
+    """
+    data_text_id = task_data.parameters.get("data_text_id", None)
+    if data_text_id is not None:
+        data_text = DataText.objects.filter(id=data_text_id).first()
+        if data_text is not None and hasattr(data_text, "multi_modal_conversation"):
+            emotion = ContextEmotionDetection(
+                multi_modal_conversation=data_text.multi_modal_conversation,
+                result=result,
+                logs=logs,
+            )
+            emotion.save()
+            logger.info(emotion)
+
+
+
+ +
+ +
+ + +

+ data_multimodal_conversation_log_context_rag(task_data, result, logs=None) + +

+ + +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task_data + TaskData + +
+

the task data

+
+
+ required +
result + dict + +
+

the result of the context rag

+
+
+ required +
logs + dict + +
+

the logs of the context rag

+
+
+ None +
+

Returns:

+ +
+ Source code in API/orchestrator/chain/utils.py +
 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
def data_multimodal_conversation_log_context_rag(
+    task_data: TaskData, result: dict, logs: dict = None
+):
+    """
+
+    Args:
+        task_data (TaskData): the task data
+        result (dict): the result of the context rag
+        logs (dict): the logs of the context rag
+
+    Returns:
+
+    """
+    data_text_id = task_data.parameters.get("data_text_id", None)
+    if data_text_id is not None:
+        data_text = DataText.objects.filter(id=data_text_id).first()
+        if data_text is not None and hasattr(data_text, "multi_modal_conversation"):
+            rag = ContextRAG(
+                multi_modal_conversation=data_text.multi_modal_conversation,
+                result=result,
+                logs=logs,
+            )
+            rag.save()
+            logger.info(rag)
+
+
+
+ +
+ +
+ + +

+ data_multimodal_conversation_log_res_speech(task_data, speech_file_path) + +

+ + +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task_data + TaskData + +
+

the task data

+
+
+ required +
speech_file_path + str + +
+

the speech file path

+
+
+ required +
+

Returns:

+ +
+ Source code in API/orchestrator/chain/utils.py +
31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
def data_multimodal_conversation_log_res_speech(
+    task_data: TaskData, speech_file_path: str
+):
+    """
+
+    Args:
+        task_data (TaskData): the task data
+        speech_file_path (str): the speech file path
+
+    Returns:
+
+    """
+    res_speech = ResSpeech.objects.create(text2speech_file=speech_file_path)
+    data_text_id = task_data.parameters.get("data_text_id", None)
+    if data_text_id is not None:
+        data_text = DataText.objects.filter(id=data_text_id).first()
+        if data_text is not None and hasattr(data_text, "multi_modal_conversation"):
+            data_text.multi_modal_conversation.res_speech = res_speech
+            data_text.multi_modal_conversation.save()
+
+
+
+ +
+ +
+ + +

+ data_multimodal_conversation_log_res_text(task_data, text) + +

+ + +
+ +

Log the ResText to the DataMultiModalConversation

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task_data + TaskData + +
+

The task data

+
+
+ required +
text + str + +
+

The text to log

+
+
+ required +
+ +
+ Source code in API/orchestrator/chain/utils.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
def data_multimodal_conversation_log_res_text(task_data: TaskData, text: str):
+    """
+    Log the ResText to the DataMultiModalConversation
+
+    Args:
+        task_data (TaskData): The task data
+        text (str): The text to log
+    """
+    res_text = ResText.objects.create(text=text)
+    data_text_id = task_data.parameters.get("data_text_id", None)
+    if data_text_id is not None:
+        data_text = DataText.objects.filter(id=data_text_id).first()
+        if data_text is not None and hasattr(data_text, "multi_modal_conversation"):
+            data_text.multi_modal_conversation.res_text = res_text
+            data_text.multi_modal_conversation.save()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/metrics/accuracy_benchmark/index.html b/Sources/API/orchestrator/metrics/accuracy_benchmark/index.html new file mode 100644 index 00000000..341b06ab --- /dev/null +++ b/Sources/API/orchestrator/metrics/accuracy_benchmark/index.html @@ -0,0 +1,7816 @@ + + + + + + + + + + + + + + + + + + + + + + + + + AccuracyBenchmark - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

AccuracyBenchmark

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ AccuracyBenchmark + + +

+ + +
+ + +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
class AccuracyBenchmark:
+    def __init__(self, benchmark_cluster: str = CLUSTER_Q_ETE_CONVERSATION_NAME):
+        """
+        Initialize the benchmark
+        Args:
+            benchmark_cluster (str): The benchmark cluster
+        """
+        # if it is a specific name, gather this metric, otherwise, report all existing cluster
+        self.benchmark_cluster = benchmark_cluster
+
+    def benchmark_run(self):
+        """
+        Run the benchmark
+        """
+        logger.info(f"Running accuracy benchmark for cluster {self.benchmark_cluster}")
+        # run the benchmark
+        html_content = ""
+        if self.benchmark_cluster == "all":
+            for cluster_name in CLUSTERS.keys():
+                html_content += "<hr>"
+                html_content += self.process_cluster_benchmark(
+                    cluster_name, detailed=False
+                )
+        else:
+            html_content += self.process_cluster_benchmark(
+                self.benchmark_cluster, detailed=False
+            )
+        return html_content
+
+    def process_cluster_benchmark(
+        self, cluster_name: str, detailed: bool = False
+    ) -> str:
+        """
+        Process the benchmark for a specific cluster
+
+        For each cluster, we will need to analyse the conversation model
+        And also need to understand what's the else model we need to analyse, for example the emotion_detection
+        Args:
+             cluster_name (str): The cluster name
+            detailed (bool): The detailed flag
+
+        Returns:
+            str: The HTML content
+        """
+        task_groups, required_tasks_count, tasks = extract_task_group(cluster_name)
+
+        required_annotation_task = self.extract_required_annotation_models(cluster_name)
+        logger.info(
+            f"Cluster: {cluster_name}, Required annotation tasks: {required_annotation_task}"
+        )
+        conversations = DataMultiModalConversation.objects.filter(
+            track_id__startswith=f"T-{cluster_name}-"
+        ).order_by("-created_at")
+
+        html_content = f"<h2>Cluster: {cluster_name}</h2>"
+        html_content += (
+            f"<p>Required tasks each group: {required_tasks_count} | "
+            f"Annotation task groups: {len(conversations)}</p>"
+        )
+
+        # the emotion and other context results also will be pulled from this one
+        # then we will according to this to load the annotation results
+        # track id and annotation => flatten the results
+        annotations = []
+        annotation_expected_keys = MultiModalAnnotationForm.declared_fields.keys()
+        annotation_pending_default = {
+            key: "pending" for key in annotation_expected_keys
+        }
+
+        for conversation in conversations:
+            conversation_annotation = conversation.annotations
+            annotated = False
+            for user_id, annotation in conversation_annotation.items():
+                annotations.append(
+                    {
+                        "track_id": conversation.track_id,
+                        "user_id": user_id,
+                        "predict_text": conversation.text.text,
+                        **annotation_pending_default,
+                        **annotation,
+                    }
+                )
+                annotated = True
+            if not annotated:
+                annotations.append(
+                    {
+                        "track_id": conversation.track_id,
+                        "user_id": "missing",
+                        "predict_text": "",
+                        **annotation_pending_default,
+                    }
+                )
+
+        conversation_annotation_df = pd.DataFrame(annotations)
+        if len(conversation_annotation_df) == 0:
+            return html_content + "<p>No conversation annotation found</p>"
+        # transform the track_id to be the last part
+        conversation_annotation_df["track_id"] = (
+            conversation_annotation_df["track_id"].str.split("-").str[-1]
+        )
+        # replace all the column names, remove the annotation prefix
+        conversation_annotation_df.columns = [
+            col.replace("annotation_", "") for col in conversation_annotation_df.columns
+        ]
+        # add CER and WER
+        conversation_annotation_df = self.calculate_speech2text_accuracy(
+            conversation_annotation_df
+        )
+
+        if detailed:
+            # then we will present them into multiple tables: speech2text, text_generation, text2speech, overall
+            if "speech2text" in required_annotation_task:
+                speech2text_df = conversation_annotation_df[
+                    [
+                        "track_id",
+                        "user_id",
+                        "predict_text",
+                        "speech2text",
+                        "wer",
+                        "cer",
+                        "speech2text_score",
+                    ]
+                ].copy(deep=True)
+                html_content += self.plot_table(speech2text_df, "Speech2Text")
+            if "text_generation" in required_annotation_task:
+                text_generation_df = conversation_annotation_df[
+                    ["track_id", "user_id", "text_generation", "text_generation_score"]
+                ].copy(deep=True)
+                html_content += self.plot_table(text_generation_df, "Text Generation")
+            if "text2speech" in required_annotation_task:
+                text2speech_df = conversation_annotation_df[
+                    ["track_id", "user_id", "text2speech_score"]
+                ].copy(deep=True)
+                html_content += self.plot_table(text2speech_df, "Text2Speech")
+
+            overall_conversation_df = conversation_annotation_df[
+                ["track_id", "user_id", "overall_comment", "overall_score"]
+            ].copy(deep=True)
+            html_content += self.plot_table(
+                overall_conversation_df, "Overall Conversation Quality"
+            )
+        else:
+            #
+            # then we will try to calculate the overall accuracy for each annotation task
+            conversation_annotation_df = self.annotation_average(
+                df=conversation_annotation_df
+            )
+            if "speech2text" in required_annotation_task:
+                desc_df = self.summary_df(
+                    conversation_annotation_df[
+                        ["track_id", "wer", "cer", "speech2text_score"]
+                    ].copy(deep=True)
+                )
+                html_content += self.plot_table(desc_df, "Speech2Text Overall Quality")
+                html_content += self.plot_distribution(
+                    conversation_annotation_df[
+                        ["track_id", "wer", "cer", "speech2text_score"]
+                    ].copy(deep=True),
+                    "Speech2Text",
+                )
+            if "text_generation" in required_annotation_task:
+                desc_df = self.summary_df(
+                    conversation_annotation_df[
+                        ["track_id", "text_generation_score"]
+                    ].copy(deep=True)
+                )
+                html_content += self.plot_table(
+                    desc_df, "Text Generation Overall Quality"
+                )
+                html_content += self.plot_distribution(
+                    conversation_annotation_df[
+                        ["track_id", "text_generation_score"]
+                    ].copy(deep=True),
+                    "Text Generation",
+                )
+
+            if "text2speech" in required_annotation_task:
+                desc_df = self.summary_df(
+                    conversation_annotation_df[["track_id", "text2speech_score"]].copy(
+                        deep=True
+                    )
+                )
+                html_content += self.plot_table(desc_df, "Text2Speech Overall Quality")
+                html_content += self.plot_distribution(
+                    conversation_annotation_df[["track_id", "text2speech_score"]].copy(
+                        deep=True
+                    ),
+                    "Text2Speech",
+                )
+
+        # summary the emotion detection task
+        if "emotion_detection" in required_annotation_task:
+            # load the emotion detection results
+            emotion_detection_results = ContextEmotionDetection.objects.filter(
+                multi_modal_conversation__in=conversations
+            ).order_by("-created_at")
+            if len(emotion_detection_results) == 0:
+                return html_content + "<h4>No emotion detection results found</h4>"
+
+            emotion_detection_expected_keys = (
+                MultiModalFKEmotionDetectionAnnotationForm.declared_fields.keys()
+            )
+            emotion_detection_pending_default = {
+                key: "pending" for key in emotion_detection_expected_keys
+            }
+            emotion_detection_annotations = []
+            for emotion_detection in emotion_detection_results:
+                emotion_detection_annotation = emotion_detection.annotations
+                annotated = False
+                for user_id, annotation in emotion_detection_annotation.items():
+                    emotion_detection_annotations.append(
+                        {
+                            "track_id": emotion_detection.multi_modal_conversation.track_id,
+                            "user_id": user_id,
+                            **emotion_detection_pending_default,
+                            **annotation,
+                        }
+                    )
+                    annotated = True
+                if not annotated:
+                    emotion_detection_annotations.append(
+                        {
+                            "track_id": emotion_detection.multi_modal_conversation.track_id,
+                            "user_id": "missing",
+                            **emotion_detection_pending_default,
+                        }
+                    )
+
+            emotion_detection_df = pd.DataFrame(emotion_detection_annotations)
+            logger.info(emotion_detection_df)
+            emotion_detection_df["track_id"] = (
+                emotion_detection_df["track_id"].str.split("-").str[-1]
+            )
+            emotion_detection_df.columns = [
+                col.replace("annotation_", "") for col in emotion_detection_df.columns
+            ]
+            if detailed:
+                html_content += self.plot_table(
+                    emotion_detection_df, "Emotion Detection"
+                )
+
+            else:
+                emotion_detection_df = self.annotation_average(emotion_detection_df)
+                desc_df = self.summary_df(emotion_detection_df)
+                # logger.info(desc_df)
+                html_content += self.plot_table(desc_df, "Emotion Detection")
+                html_content += self.plot_distribution(
+                    emotion_detection_df, "Emotion Detection"
+                )
+        return html_content
+
+    @staticmethod
+    def plot_table(df: pd.DataFrame, title: str = "") -> str:
+        """
+        Plot the table
+        Args:
+            df (pd.DataFrame): The dataframe
+            title (str): The title
+
+        Returns:
+            str: The plot in HTML
+        """
+        colors = []
+        for col in df.columns:
+            col_colors = []
+            for val in df[col]:
+                if isinstance(val, float) or isinstance(val, int):
+                    col_colors.append("lavender")
+                else:
+                    if val == "missing":
+                        col_colors.append("lightcoral")
+                    elif val == "started":
+                        col_colors.append("lightyellow")
+                    elif val == "failed":
+                        col_colors.append("lightcoral")
+                    elif val == "pending":
+                        col_colors.append("lightblue")
+                    elif val == "incomplete":
+                        col_colors.append("lightgrey")
+                    else:
+                        col_colors.append("lightgreen")
+            colors.append(col_colors)
+        # Create a Plotly table
+        fig = go.Figure(
+            data=[
+                go.Table(
+                    header=dict(
+                        values=[
+                            (
+                                [f"<b>{c.upper()}</b>" for c in col]
+                                if isinstance(col, tuple)
+                                else f"<b>{col.upper()}</b>"
+                            )
+                            for col in df.columns
+                        ],
+                        fill_color="paleturquoise",
+                        align="left",
+                    ),
+                    cells=dict(
+                        values=[df[col] for col in df.columns],
+                        fill_color=colors,
+                        align="left",
+                    ),
+                )
+            ]
+        )
+        fig.update_layout(
+            title={
+                "text": f"Accuracy: {title}",
+                "x": 0.5,
+                "xanchor": "center",
+                "yanchor": "top",
+            },
+            #     update margin to be 0
+            margin=dict(l=10, r=10, b=0),
+            # get the height to be whatever it requires
+            height=max((len(df) * 35), 400),
+        )
+        # Update layout for better appearance
+        desc_html = fig.to_html(full_html=False)
+        return desc_html
+
+    @staticmethod
+    def plot_distribution(df: pd.DataFrame, title: str = "") -> str:
+        """
+        Plot the distribution of the latency
+        Args:
+            df (pd.DataFrame): The dataframe
+            title (str): The title
+
+        Returns:
+            str: The plot in HTML
+        """
+        # plot the distribution for each column
+        # Calculate mean and max for each latency column
+
+        mean_accuracies = df[df.columns[1:]].mean()
+        max_accuracies = df[df.columns[1:]].max()
+        min_accuracies = df[df.columns[1:]].min()
+
+        # Create a Plotly figure
+        fig = go.Figure()
+        # Add min latencies to the figure
+        fig.add_trace(
+            go.Bar(x=min_accuracies.index, y=min_accuracies.values, name="Min Accuracy")
+        )
+        # Add mean latencies to the figure
+        fig.add_trace(
+            go.Bar(
+                x=mean_accuracies.index, y=mean_accuracies.values, name="Mean Accuracy"
+            )
+        )
+
+        # Add max latencies to the figure
+        fig.add_trace(
+            go.Bar(x=max_accuracies.index, y=max_accuracies.values, name="Max Accuracy")
+        )
+
+        # Customize the layout
+        fig.update_layout(
+            title={
+                "text": "Accuracy Distribution" + title,
+                "x": 0.5,
+                "xanchor": "center",
+                "yanchor": "top",
+            },
+            xaxis_title="Evaluation Metrics",
+            yaxis_title="Accuracies",
+            barmode="group",
+            margin=dict(l=10, r=10, b=0),
+        )
+
+        # Convert Plotly figure to HTML
+        plot_html = fig.to_html(full_html=False)
+        return plot_html
+
+    @staticmethod
+    def extract_required_annotation_models(cluster_name: str) -> List[str]:
+        """
+        Extract the required annotation models
+        Args:
+            cluster_name (str): The cluster name
+        """
+        cluster = CLUSTERS.get(cluster_name, None)
+        if cluster is None:
+            raise ValueError(f"Cluster {cluster_name} not found")
+
+        # candidate included: speech2text, text_generation, text2speech, this normally is required
+        # other include emotion_detection now
+        required_annotation_task = []
+        for item in cluster.values():
+            if item["component_type"] == "task":
+                task_name = item["task_name"]
+                required_annotation_task.append(
+                    Task.task_ml_task_mapping().get(task_name, None)
+                )
+
+        # filter out None
+        required_annotation_task = list(filter(None, required_annotation_task))
+        # remove the duplicate
+        return list(set(required_annotation_task))
+
+    @staticmethod
+    def calculate_speech2text_accuracy(df: pd.DataFrame) -> pd.DataFrame:
+        """
+        Calculate the speech2text accuracy
+        Args:
+            df (pd.DataFrame): The dataframe
+
+        Returns:
+            float: The accuracy
+        """
+        # both predict_text and speech2text can be null
+        # if the predict_text is null, then we will consider it as 0
+        # if the speech2text is null, then we will consider it as 0
+        df["speech2text"] = df["speech2text"].fillna("")
+        df["predict_text"] = df["predict_text"].fillna("")
+        # calculate the accuracy
+        df["wer"] = df.apply(
+            lambda x: (
+                round(
+                    jiwer.wer(
+                        x["speech2text"],
+                        x["predict_text"],
+                    ),
+                    2,
+                )
+                if len(x["speech2text"]) > 0
+                else 0
+            ),
+            axis=1,
+        )
+
+        df["cer"] = df.apply(
+            lambda x: (
+                round(
+                    jiwer.cer(
+                        x["speech2text"],
+                        x["predict_text"],
+                    ),
+                    2,
+                )
+                if len(x["speech2text"]) > 0
+                else 0
+            ),
+            axis=1,
+        )
+
+        return df
+
+    @staticmethod
+    def annotation_average(df: pd.DataFrame) -> pd.DataFrame:
+        """
+        Calculate the average of the annotation
+        Args:
+            df (pd.DataFrame): The dataframe
+
+        Returns:
+            pd.DataFrame: The dataframe
+        """
+        # for each row, if the value is missing or pending, remove the row
+        # then calculate the average for each track_id
+        df = df.replace("missing", pd.NA)
+        df = df.replace("pending", pd.NA)
+        df = df.dropna(subset=df.columns[2:], how="any")
+        # try to get all columns to float, if not possible, then keep it as it is
+        # loop the columns, try to get it to float
+        for col in df.columns[2:]:
+            try:
+                df[col] = df[col].astype(float)
+            except ValueError:
+                pass
+        numeric_columns = df.select_dtypes(include=["float64", "int64"]).columns
+        df_mean = df.groupby("track_id")[numeric_columns].mean().reset_index()
+        return df_mean
+
+    @staticmethod
+    def summary_df(df: pd.DataFrame) -> pd.DataFrame:
+        """
+        Summary the given dataframe
+
+        Args:
+            df (pd.DataFrame): The dataframe
+
+        Returns:
+            str: The HTML content
+        """
+        # for the same track_id, aggregate the results into one, and use the mean as the final result
+        # df = df.apply(pd.to_numeric, errors='coerce')
+
+        # Group by 'track_id' and calculate the mean for each group
+        # df = df.groupby("track_id").mean().reset_index()
+        desc_df = df.describe().transpose()
+        desc_df = desc_df.reset_index()
+        desc_df.rename(columns={"index": "metric"}, inplace=True)
+        desc_df = desc_df.round(4)
+        return desc_df
+
+    def detail_run(self):
+        logger.info(f"Running accuracy benchmark for cluster {self.benchmark_cluster}")
+        # run the benchmark
+        html_content = ""
+        if self.benchmark_cluster == "all":
+            for cluster_name in CLUSTERS.keys():
+                html_content += "<hr>"
+                html_content += self.process_cluster_benchmark(
+                    cluster_name, detailed=True
+                )
+        else:
+            html_content += self.process_cluster_benchmark(
+                self.benchmark_cluster, detailed=True
+            )
+        return html_content
+
+    def multi_turn_benchmark_run(self):
+        """
+        Run the multi-turn benchmark
+        Returns:
+
+        """
+        logger.info(
+            f"Running multi-turn benchmark for cluster {self.benchmark_cluster}"
+        )
+        # run the benchmark
+        html_content = ""
+        if self.benchmark_cluster == "all":
+            for cluster_name in CLUSTERS.keys():
+                html_content += "<hr>"
+                html_content += self.process_multi_turn_benchmark(cluster_name)
+        else:
+            html_content += self.process_multi_turn_benchmark(self.benchmark_cluster)
+        return html_content
+
+    def process_multi_turn_benchmark(self, cluster_name: str) -> str:
+        """
+        Process the multi-turn benchmark
+
+        First we will need to get all tag with this cluster name, and grab the last one within each tag
+        Args:
+            cluster_name (str): The cluster name
+        Returns:
+
+        """
+        conversations = DataMultiModalConversation.objects.filter(
+            track_id__startswith=f"T-{cluster_name}-"
+        )
+
+        # grab all tags
+
+        tags = []
+        for conversation in conversations:
+            for tag in conversation.tags.all():
+                tags.append(tag.name)
+
+        tags = list(set(tags))
+
+        tag_last_conversations = []
+        for tag in tags:
+            last_conversation = (
+                conversations.filter(tags__name=tag).order_by("-created_at").first()
+            )
+            tag_last_conversations.append(last_conversation)
+
+        html_content = f"<h2>Cluster: {cluster_name}</h2>"
+        html_content += (
+            f"<p>Multi-Turn Conversation Count: {len(tag_last_conversations)}</p>"
+        )
+        # then we will need to analyse the conversation model
+        multi_turn_annotations = []
+        # get all possible keys from the annotation form
+        multi_turn_annotations_expected_keys = []
+        for conversation in tag_last_conversations:
+            conversation_annotation = conversation.multi_turns_annotations
+            for user_id, annotation in conversation_annotation.items():
+                multi_turn_annotations_expected_keys.extend(annotation.keys())
+        multi_turn_annotations_expected_keys = list(
+            set(multi_turn_annotations_expected_keys)
+        )
+
+        if "multi_turn_annotation_overall" not in multi_turn_annotations_expected_keys:
+            multi_turn_annotations_expected_keys.append("multi_turn_annotation_overall")
+        if (
+            "multi_turn_annotation_overall_comment"
+            not in multi_turn_annotations_expected_keys
+        ):
+            multi_turn_annotations_expected_keys.append(
+                "multi_turn_annotation_overall_comment"
+            )
+
+        multi_turn_annotations_pending_default = {
+            key: "pending" for key in multi_turn_annotations_expected_keys
+        }
+
+        for conversation in tag_last_conversations:
+            conversation_annotation = conversation.multi_turns_annotations
+            annotated = False
+            for user_id, annotation in conversation_annotation.items():
+                multi_turn_annotations.append(
+                    {
+                        "track_id": conversation.track_id,
+                        "user_id": user_id,
+                        **multi_turn_annotations_pending_default,
+                        **annotation,
+                    }
+                )
+                annotated = True
+            if not annotated:
+                multi_turn_annotations.append(
+                    {
+                        "track_id": conversation.track_id,
+                        "user_id": "missing",
+                        **multi_turn_annotations_pending_default,
+                    }
+                )
+
+        multi_turn_annotation_df = pd.DataFrame(multi_turn_annotations)
+
+        if len(multi_turn_annotation_df) == 0:
+            return html_content + "<p>No multi-turn conversation annotation found</p>"
+        # transform the track_id to be the last part
+        multi_turn_annotation_df["track_id"] = (
+            multi_turn_annotation_df["track_id"].str.split("-").str[-1]
+        )
+        # replace all the column names, remove the annotation prefix
+        multi_turn_annotation_df.columns = [
+            col.replace("multi_turn_annotation_", "")
+            for col in multi_turn_annotation_df.columns
+        ]
+
+        html_content += self.plot_table(
+            multi_turn_annotation_df, "Multi-Turn Conversation"
+        )
+        return html_content
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(benchmark_cluster=CLUSTER_Q_ETE_CONVERSATION_NAME) + +

+ + +
+ +

Initialize the benchmark +Args: + benchmark_cluster (str): The benchmark cluster

+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
21
+22
+23
+24
+25
+26
+27
+28
def __init__(self, benchmark_cluster: str = CLUSTER_Q_ETE_CONVERSATION_NAME):
+    """
+    Initialize the benchmark
+    Args:
+        benchmark_cluster (str): The benchmark cluster
+    """
+    # if it is a specific name, gather this metric, otherwise, report all existing cluster
+    self.benchmark_cluster = benchmark_cluster
+
+
+
+ +
+ +
+ + +

+ annotation_average(df) + + + staticmethod + + +

+ + +
+ +

Calculate the average of the annotation +Args: + df (pd.DataFrame): The dataframe

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ DataFrame + +
+

pd.DataFrame: The dataframe

+
+
+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
@staticmethod
+def annotation_average(df: pd.DataFrame) -> pd.DataFrame:
+    """
+    Calculate the average of the annotation
+    Args:
+        df (pd.DataFrame): The dataframe
+
+    Returns:
+        pd.DataFrame: The dataframe
+    """
+    # for each row, if the value is missing or pending, remove the row
+    # then calculate the average for each track_id
+    df = df.replace("missing", pd.NA)
+    df = df.replace("pending", pd.NA)
+    df = df.dropna(subset=df.columns[2:], how="any")
+    # try to get all columns to float, if not possible, then keep it as it is
+    # loop the columns, try to get it to float
+    for col in df.columns[2:]:
+        try:
+            df[col] = df[col].astype(float)
+        except ValueError:
+            pass
+    numeric_columns = df.select_dtypes(include=["float64", "int64"]).columns
+    df_mean = df.groupby("track_id")[numeric_columns].mean().reset_index()
+    return df_mean
+
+
+
+ +
+ +
+ + +

+ benchmark_run() + +

+ + +
+ +

Run the benchmark

+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
def benchmark_run(self):
+    """
+    Run the benchmark
+    """
+    logger.info(f"Running accuracy benchmark for cluster {self.benchmark_cluster}")
+    # run the benchmark
+    html_content = ""
+    if self.benchmark_cluster == "all":
+        for cluster_name in CLUSTERS.keys():
+            html_content += "<hr>"
+            html_content += self.process_cluster_benchmark(
+                cluster_name, detailed=False
+            )
+    else:
+        html_content += self.process_cluster_benchmark(
+            self.benchmark_cluster, detailed=False
+        )
+    return html_content
+
+
+
+ +
+ +
+ + +

+ calculate_speech2text_accuracy(df) + + + staticmethod + + +

+ + +
+ +

Calculate the speech2text accuracy +Args: + df (pd.DataFrame): The dataframe

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
float + DataFrame + +
+

The accuracy

+
+
+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
@staticmethod
+def calculate_speech2text_accuracy(df: pd.DataFrame) -> pd.DataFrame:
+    """
+    Calculate the speech2text accuracy
+    Args:
+        df (pd.DataFrame): The dataframe
+
+    Returns:
+        float: The accuracy
+    """
+    # both predict_text and speech2text can be null
+    # if the predict_text is null, then we will consider it as 0
+    # if the speech2text is null, then we will consider it as 0
+    df["speech2text"] = df["speech2text"].fillna("")
+    df["predict_text"] = df["predict_text"].fillna("")
+    # calculate the accuracy
+    df["wer"] = df.apply(
+        lambda x: (
+            round(
+                jiwer.wer(
+                    x["speech2text"],
+                    x["predict_text"],
+                ),
+                2,
+            )
+            if len(x["speech2text"]) > 0
+            else 0
+        ),
+        axis=1,
+    )
+
+    df["cer"] = df.apply(
+        lambda x: (
+            round(
+                jiwer.cer(
+                    x["speech2text"],
+                    x["predict_text"],
+                ),
+                2,
+            )
+            if len(x["speech2text"]) > 0
+            else 0
+        ),
+        axis=1,
+    )
+
+    return df
+
+
+
+ +
+ +
+ + +

+ extract_required_annotation_models(cluster_name) + + + staticmethod + + +

+ + +
+ +

Extract the required annotation models +Args: + cluster_name (str): The cluster name

+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
@staticmethod
+def extract_required_annotation_models(cluster_name: str) -> List[str]:
+    """
+    Extract the required annotation models
+    Args:
+        cluster_name (str): The cluster name
+    """
+    cluster = CLUSTERS.get(cluster_name, None)
+    if cluster is None:
+        raise ValueError(f"Cluster {cluster_name} not found")
+
+    # candidate included: speech2text, text_generation, text2speech, this normally is required
+    # other include emotion_detection now
+    required_annotation_task = []
+    for item in cluster.values():
+        if item["component_type"] == "task":
+            task_name = item["task_name"]
+            required_annotation_task.append(
+                Task.task_ml_task_mapping().get(task_name, None)
+            )
+
+    # filter out None
+    required_annotation_task = list(filter(None, required_annotation_task))
+    # remove the duplicate
+    return list(set(required_annotation_task))
+
+
+
+ +
+ +
+ + +

+ multi_turn_benchmark_run() + +

+ + +
+ +

Run the multi-turn benchmark +Returns:

+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
def multi_turn_benchmark_run(self):
+    """
+    Run the multi-turn benchmark
+    Returns:
+
+    """
+    logger.info(
+        f"Running multi-turn benchmark for cluster {self.benchmark_cluster}"
+    )
+    # run the benchmark
+    html_content = ""
+    if self.benchmark_cluster == "all":
+        for cluster_name in CLUSTERS.keys():
+            html_content += "<hr>"
+            html_content += self.process_multi_turn_benchmark(cluster_name)
+    else:
+        html_content += self.process_multi_turn_benchmark(self.benchmark_cluster)
+    return html_content
+
+
+
+ +
+ +
+ + +

+ plot_distribution(df, title='') + + + staticmethod + + +

+ + +
+ +

Plot the distribution of the latency +Args: + df (pd.DataFrame): The dataframe + title (str): The title

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

The plot in HTML

+
+
+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
@staticmethod
+def plot_distribution(df: pd.DataFrame, title: str = "") -> str:
+    """
+    Plot the distribution of the latency
+    Args:
+        df (pd.DataFrame): The dataframe
+        title (str): The title
+
+    Returns:
+        str: The plot in HTML
+    """
+    # plot the distribution for each column
+    # Calculate mean and max for each latency column
+
+    mean_accuracies = df[df.columns[1:]].mean()
+    max_accuracies = df[df.columns[1:]].max()
+    min_accuracies = df[df.columns[1:]].min()
+
+    # Create a Plotly figure
+    fig = go.Figure()
+    # Add min latencies to the figure
+    fig.add_trace(
+        go.Bar(x=min_accuracies.index, y=min_accuracies.values, name="Min Accuracy")
+    )
+    # Add mean latencies to the figure
+    fig.add_trace(
+        go.Bar(
+            x=mean_accuracies.index, y=mean_accuracies.values, name="Mean Accuracy"
+        )
+    )
+
+    # Add max latencies to the figure
+    fig.add_trace(
+        go.Bar(x=max_accuracies.index, y=max_accuracies.values, name="Max Accuracy")
+    )
+
+    # Customize the layout
+    fig.update_layout(
+        title={
+            "text": "Accuracy Distribution" + title,
+            "x": 0.5,
+            "xanchor": "center",
+            "yanchor": "top",
+        },
+        xaxis_title="Evaluation Metrics",
+        yaxis_title="Accuracies",
+        barmode="group",
+        margin=dict(l=10, r=10, b=0),
+    )
+
+    # Convert Plotly figure to HTML
+    plot_html = fig.to_html(full_html=False)
+    return plot_html
+
+
+
+ +
+ +
+ + +

+ plot_table(df, title='') + + + staticmethod + + +

+ + +
+ +

Plot the table +Args: + df (pd.DataFrame): The dataframe + title (str): The title

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

The plot in HTML

+
+
+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
@staticmethod
+def plot_table(df: pd.DataFrame, title: str = "") -> str:
+    """
+    Plot the table
+    Args:
+        df (pd.DataFrame): The dataframe
+        title (str): The title
+
+    Returns:
+        str: The plot in HTML
+    """
+    colors = []
+    for col in df.columns:
+        col_colors = []
+        for val in df[col]:
+            if isinstance(val, float) or isinstance(val, int):
+                col_colors.append("lavender")
+            else:
+                if val == "missing":
+                    col_colors.append("lightcoral")
+                elif val == "started":
+                    col_colors.append("lightyellow")
+                elif val == "failed":
+                    col_colors.append("lightcoral")
+                elif val == "pending":
+                    col_colors.append("lightblue")
+                elif val == "incomplete":
+                    col_colors.append("lightgrey")
+                else:
+                    col_colors.append("lightgreen")
+        colors.append(col_colors)
+    # Create a Plotly table
+    fig = go.Figure(
+        data=[
+            go.Table(
+                header=dict(
+                    values=[
+                        (
+                            [f"<b>{c.upper()}</b>" for c in col]
+                            if isinstance(col, tuple)
+                            else f"<b>{col.upper()}</b>"
+                        )
+                        for col in df.columns
+                    ],
+                    fill_color="paleturquoise",
+                    align="left",
+                ),
+                cells=dict(
+                    values=[df[col] for col in df.columns],
+                    fill_color=colors,
+                    align="left",
+                ),
+            )
+        ]
+    )
+    fig.update_layout(
+        title={
+            "text": f"Accuracy: {title}",
+            "x": 0.5,
+            "xanchor": "center",
+            "yanchor": "top",
+        },
+        #     update margin to be 0
+        margin=dict(l=10, r=10, b=0),
+        # get the height to be whatever it requires
+        height=max((len(df) * 35), 400),
+    )
+    # Update layout for better appearance
+    desc_html = fig.to_html(full_html=False)
+    return desc_html
+
+
+
+ +
+ +
+ + +

+ process_cluster_benchmark(cluster_name, detailed=False) + +

+ + +
+ +

Process the benchmark for a specific cluster

+

For each cluster, we will need to analyse the conversation model +And also need to understand what's the else model we need to analyse, for example the emotion_detection +Args: + cluster_name (str): The cluster name + detailed (bool): The detailed flag

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

The HTML content

+
+
+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
def process_cluster_benchmark(
+    self, cluster_name: str, detailed: bool = False
+) -> str:
+    """
+    Process the benchmark for a specific cluster
+
+    For each cluster, we will need to analyse the conversation model
+    And also need to understand what's the else model we need to analyse, for example the emotion_detection
+    Args:
+         cluster_name (str): The cluster name
+        detailed (bool): The detailed flag
+
+    Returns:
+        str: The HTML content
+    """
+    task_groups, required_tasks_count, tasks = extract_task_group(cluster_name)
+
+    required_annotation_task = self.extract_required_annotation_models(cluster_name)
+    logger.info(
+        f"Cluster: {cluster_name}, Required annotation tasks: {required_annotation_task}"
+    )
+    conversations = DataMultiModalConversation.objects.filter(
+        track_id__startswith=f"T-{cluster_name}-"
+    ).order_by("-created_at")
+
+    html_content = f"<h2>Cluster: {cluster_name}</h2>"
+    html_content += (
+        f"<p>Required tasks each group: {required_tasks_count} | "
+        f"Annotation task groups: {len(conversations)}</p>"
+    )
+
+    # the emotion and other context results also will be pulled from this one
+    # then we will according to this to load the annotation results
+    # track id and annotation => flatten the results
+    annotations = []
+    annotation_expected_keys = MultiModalAnnotationForm.declared_fields.keys()
+    annotation_pending_default = {
+        key: "pending" for key in annotation_expected_keys
+    }
+
+    for conversation in conversations:
+        conversation_annotation = conversation.annotations
+        annotated = False
+        for user_id, annotation in conversation_annotation.items():
+            annotations.append(
+                {
+                    "track_id": conversation.track_id,
+                    "user_id": user_id,
+                    "predict_text": conversation.text.text,
+                    **annotation_pending_default,
+                    **annotation,
+                }
+            )
+            annotated = True
+        if not annotated:
+            annotations.append(
+                {
+                    "track_id": conversation.track_id,
+                    "user_id": "missing",
+                    "predict_text": "",
+                    **annotation_pending_default,
+                }
+            )
+
+    conversation_annotation_df = pd.DataFrame(annotations)
+    if len(conversation_annotation_df) == 0:
+        return html_content + "<p>No conversation annotation found</p>"
+    # transform the track_id to be the last part
+    conversation_annotation_df["track_id"] = (
+        conversation_annotation_df["track_id"].str.split("-").str[-1]
+    )
+    # replace all the column names, remove the annotation prefix
+    conversation_annotation_df.columns = [
+        col.replace("annotation_", "") for col in conversation_annotation_df.columns
+    ]
+    # add CER and WER
+    conversation_annotation_df = self.calculate_speech2text_accuracy(
+        conversation_annotation_df
+    )
+
+    if detailed:
+        # then we will present them into multiple tables: speech2text, text_generation, text2speech, overall
+        if "speech2text" in required_annotation_task:
+            speech2text_df = conversation_annotation_df[
+                [
+                    "track_id",
+                    "user_id",
+                    "predict_text",
+                    "speech2text",
+                    "wer",
+                    "cer",
+                    "speech2text_score",
+                ]
+            ].copy(deep=True)
+            html_content += self.plot_table(speech2text_df, "Speech2Text")
+        if "text_generation" in required_annotation_task:
+            text_generation_df = conversation_annotation_df[
+                ["track_id", "user_id", "text_generation", "text_generation_score"]
+            ].copy(deep=True)
+            html_content += self.plot_table(text_generation_df, "Text Generation")
+        if "text2speech" in required_annotation_task:
+            text2speech_df = conversation_annotation_df[
+                ["track_id", "user_id", "text2speech_score"]
+            ].copy(deep=True)
+            html_content += self.plot_table(text2speech_df, "Text2Speech")
+
+        overall_conversation_df = conversation_annotation_df[
+            ["track_id", "user_id", "overall_comment", "overall_score"]
+        ].copy(deep=True)
+        html_content += self.plot_table(
+            overall_conversation_df, "Overall Conversation Quality"
+        )
+    else:
+        #
+        # then we will try to calculate the overall accuracy for each annotation task
+        conversation_annotation_df = self.annotation_average(
+            df=conversation_annotation_df
+        )
+        if "speech2text" in required_annotation_task:
+            desc_df = self.summary_df(
+                conversation_annotation_df[
+                    ["track_id", "wer", "cer", "speech2text_score"]
+                ].copy(deep=True)
+            )
+            html_content += self.plot_table(desc_df, "Speech2Text Overall Quality")
+            html_content += self.plot_distribution(
+                conversation_annotation_df[
+                    ["track_id", "wer", "cer", "speech2text_score"]
+                ].copy(deep=True),
+                "Speech2Text",
+            )
+        if "text_generation" in required_annotation_task:
+            desc_df = self.summary_df(
+                conversation_annotation_df[
+                    ["track_id", "text_generation_score"]
+                ].copy(deep=True)
+            )
+            html_content += self.plot_table(
+                desc_df, "Text Generation Overall Quality"
+            )
+            html_content += self.plot_distribution(
+                conversation_annotation_df[
+                    ["track_id", "text_generation_score"]
+                ].copy(deep=True),
+                "Text Generation",
+            )
+
+        if "text2speech" in required_annotation_task:
+            desc_df = self.summary_df(
+                conversation_annotation_df[["track_id", "text2speech_score"]].copy(
+                    deep=True
+                )
+            )
+            html_content += self.plot_table(desc_df, "Text2Speech Overall Quality")
+            html_content += self.plot_distribution(
+                conversation_annotation_df[["track_id", "text2speech_score"]].copy(
+                    deep=True
+                ),
+                "Text2Speech",
+            )
+
+    # summary the emotion detection task
+    if "emotion_detection" in required_annotation_task:
+        # load the emotion detection results
+        emotion_detection_results = ContextEmotionDetection.objects.filter(
+            multi_modal_conversation__in=conversations
+        ).order_by("-created_at")
+        if len(emotion_detection_results) == 0:
+            return html_content + "<h4>No emotion detection results found</h4>"
+
+        emotion_detection_expected_keys = (
+            MultiModalFKEmotionDetectionAnnotationForm.declared_fields.keys()
+        )
+        emotion_detection_pending_default = {
+            key: "pending" for key in emotion_detection_expected_keys
+        }
+        emotion_detection_annotations = []
+        for emotion_detection in emotion_detection_results:
+            emotion_detection_annotation = emotion_detection.annotations
+            annotated = False
+            for user_id, annotation in emotion_detection_annotation.items():
+                emotion_detection_annotations.append(
+                    {
+                        "track_id": emotion_detection.multi_modal_conversation.track_id,
+                        "user_id": user_id,
+                        **emotion_detection_pending_default,
+                        **annotation,
+                    }
+                )
+                annotated = True
+            if not annotated:
+                emotion_detection_annotations.append(
+                    {
+                        "track_id": emotion_detection.multi_modal_conversation.track_id,
+                        "user_id": "missing",
+                        **emotion_detection_pending_default,
+                    }
+                )
+
+        emotion_detection_df = pd.DataFrame(emotion_detection_annotations)
+        logger.info(emotion_detection_df)
+        emotion_detection_df["track_id"] = (
+            emotion_detection_df["track_id"].str.split("-").str[-1]
+        )
+        emotion_detection_df.columns = [
+            col.replace("annotation_", "") for col in emotion_detection_df.columns
+        ]
+        if detailed:
+            html_content += self.plot_table(
+                emotion_detection_df, "Emotion Detection"
+            )
+
+        else:
+            emotion_detection_df = self.annotation_average(emotion_detection_df)
+            desc_df = self.summary_df(emotion_detection_df)
+            # logger.info(desc_df)
+            html_content += self.plot_table(desc_df, "Emotion Detection")
+            html_content += self.plot_distribution(
+                emotion_detection_df, "Emotion Detection"
+            )
+    return html_content
+
+
+
+ +
+ +
+ + +

+ process_multi_turn_benchmark(cluster_name) + +

+ + +
+ +

Process the multi-turn benchmark

+

First we will need to get all tag with this cluster name, and grab the last one within each tag +Args: + cluster_name (str): The cluster name +Returns:

+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
def process_multi_turn_benchmark(self, cluster_name: str) -> str:
+    """
+    Process the multi-turn benchmark
+
+    First we will need to get all tag with this cluster name, and grab the last one within each tag
+    Args:
+        cluster_name (str): The cluster name
+    Returns:
+
+    """
+    conversations = DataMultiModalConversation.objects.filter(
+        track_id__startswith=f"T-{cluster_name}-"
+    )
+
+    # grab all tags
+
+    tags = []
+    for conversation in conversations:
+        for tag in conversation.tags.all():
+            tags.append(tag.name)
+
+    tags = list(set(tags))
+
+    tag_last_conversations = []
+    for tag in tags:
+        last_conversation = (
+            conversations.filter(tags__name=tag).order_by("-created_at").first()
+        )
+        tag_last_conversations.append(last_conversation)
+
+    html_content = f"<h2>Cluster: {cluster_name}</h2>"
+    html_content += (
+        f"<p>Multi-Turn Conversation Count: {len(tag_last_conversations)}</p>"
+    )
+    # then we will need to analyse the conversation model
+    multi_turn_annotations = []
+    # get all possible keys from the annotation form
+    multi_turn_annotations_expected_keys = []
+    for conversation in tag_last_conversations:
+        conversation_annotation = conversation.multi_turns_annotations
+        for user_id, annotation in conversation_annotation.items():
+            multi_turn_annotations_expected_keys.extend(annotation.keys())
+    multi_turn_annotations_expected_keys = list(
+        set(multi_turn_annotations_expected_keys)
+    )
+
+    if "multi_turn_annotation_overall" not in multi_turn_annotations_expected_keys:
+        multi_turn_annotations_expected_keys.append("multi_turn_annotation_overall")
+    if (
+        "multi_turn_annotation_overall_comment"
+        not in multi_turn_annotations_expected_keys
+    ):
+        multi_turn_annotations_expected_keys.append(
+            "multi_turn_annotation_overall_comment"
+        )
+
+    multi_turn_annotations_pending_default = {
+        key: "pending" for key in multi_turn_annotations_expected_keys
+    }
+
+    for conversation in tag_last_conversations:
+        conversation_annotation = conversation.multi_turns_annotations
+        annotated = False
+        for user_id, annotation in conversation_annotation.items():
+            multi_turn_annotations.append(
+                {
+                    "track_id": conversation.track_id,
+                    "user_id": user_id,
+                    **multi_turn_annotations_pending_default,
+                    **annotation,
+                }
+            )
+            annotated = True
+        if not annotated:
+            multi_turn_annotations.append(
+                {
+                    "track_id": conversation.track_id,
+                    "user_id": "missing",
+                    **multi_turn_annotations_pending_default,
+                }
+            )
+
+    multi_turn_annotation_df = pd.DataFrame(multi_turn_annotations)
+
+    if len(multi_turn_annotation_df) == 0:
+        return html_content + "<p>No multi-turn conversation annotation found</p>"
+    # transform the track_id to be the last part
+    multi_turn_annotation_df["track_id"] = (
+        multi_turn_annotation_df["track_id"].str.split("-").str[-1]
+    )
+    # replace all the column names, remove the annotation prefix
+    multi_turn_annotation_df.columns = [
+        col.replace("multi_turn_annotation_", "")
+        for col in multi_turn_annotation_df.columns
+    ]
+
+    html_content += self.plot_table(
+        multi_turn_annotation_df, "Multi-Turn Conversation"
+    )
+    return html_content
+
+
+
+ +
+ +
+ + +

+ summary_df(df) + + + staticmethod + + +

+ + +
+ +

Summary the given dataframe

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
df + DataFrame + +
+

The dataframe

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + DataFrame + +
+

The HTML content

+
+
+ +
+ Source code in API/orchestrator/metrics/accuracy_benchmark.py +
496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
@staticmethod
+def summary_df(df: pd.DataFrame) -> pd.DataFrame:
+    """
+    Summary the given dataframe
+
+    Args:
+        df (pd.DataFrame): The dataframe
+
+    Returns:
+        str: The HTML content
+    """
+    # for the same track_id, aggregate the results into one, and use the mean as the final result
+    # df = df.apply(pd.to_numeric, errors='coerce')
+
+    # Group by 'track_id' and calculate the mean for each group
+    # df = df.groupby("track_id").mean().reset_index()
+    desc_df = df.describe().transpose()
+    desc_df = desc_df.reset_index()
+    desc_df.rename(columns={"index": "metric"}, inplace=True)
+    desc_df = desc_df.round(4)
+    return desc_df
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/metrics/latency_benchmark/index.html b/Sources/API/orchestrator/metrics/latency_benchmark/index.html new file mode 100644 index 00000000..8f8421ff --- /dev/null +++ b/Sources/API/orchestrator/metrics/latency_benchmark/index.html @@ -0,0 +1,8204 @@ + + + + + + + + + + + + + + + + + + + + + + + + + LatencyBenchmark - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

LatencyBenchmark

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ LatencyBenchmark + + +

+ + +
+ + +

For each component, we will generally have two values: +- model_latency: The time taken by the model to process the data +- transfer_latency: The time taken to transfer the data to the model +- overall_latency: The time taken by the model to process the data and transfer the data to the model

+

The whole pipeline latency will be the sum of +- all component start end end ts

+

Another way to output the performance is the Timeline +- start will be 0 +- and average relative time to 0 for each important time point, plot them in the timeline

+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
class LatencyBenchmark:
+    """
+    For each component, we will generally have two values:
+    - model_latency: The time taken by the model to process the data
+    - transfer_latency: The time taken to transfer the data to the model
+    - overall_latency: The time taken by the model to process the data and transfer the data to the model
+
+    The whole pipeline latency will be the sum of
+    - all component start end end ts
+
+    Another way to output the performance is the Timeline
+    - start will be 0
+    - and average relative time to 0 for each important time point, plot them in the timeline
+    """
+
+    def __init__(self, benchmark_cluster: str = CLUSTER_Q_ETE_CONVERSATION_NAME):
+        """
+        Initialize the benchmark
+        Args:
+            benchmark_cluster (str): The benchmark cluster
+        """
+        # if it is a specific name, gather this metric, otherwise, report all existing cluster
+        self.benchmark_cluster = benchmark_cluster
+
+    def run(self):
+        """
+        Run the benchmark
+        """
+        html_content = ""
+        if self.benchmark_cluster == "all":
+            for cluster_name in CLUSTERS.keys():
+                # add a divider
+                html_content += "<hr>"
+                html_content += self.process_cluster(cluster_name)
+        else:
+            if self.benchmark_cluster not in CLUSTERS:
+                raise ValueError(f"Cluster {self.benchmark_cluster} not found")
+            html_content += "<hr>"
+            html_content += self.process_cluster(self.benchmark_cluster)
+        return html_content
+
+    def run_detail(self) -> str:
+        html_content = ""
+        if self.benchmark_cluster == "all":
+            for cluster_name in CLUSTERS.keys():
+                # add a divider
+                html_content += "<hr>"
+                html_content += self.process_cluster_detail(cluster_name)
+        else:
+            if self.benchmark_cluster not in CLUSTERS:
+                raise ValueError(f"Cluster {self.benchmark_cluster} not found")
+            html_content += "<hr>"
+            html_content += self.process_cluster_detail(self.benchmark_cluster)
+        return html_content
+
+    def process_cluster(self, cluster_name: str):
+        """
+        Process the cluster
+        Args:
+            cluster_name (str): The cluster name
+        """
+        task_groups, required_tasks_count, tasks = extract_task_group(cluster_name)
+        general_desc = f"<h2>Cluster: {cluster_name}</h2>"
+        general_desc += f"<p>Required tasks: {required_tasks_count} | Total tasks groups: {len(task_groups)}</p>"
+        # loop through the task groups, if the success task is not == required_tasks_count, then we will skip
+        success_pipeline = 0
+        cluster_latency = []
+        for track_id, task_group in task_groups.items():
+            success_tasks = [
+                task for task in task_group if task.result_status == "completed"
+            ]
+            if len(success_tasks) != required_tasks_count:
+                # the pipeline is not completed, so we will skip
+                continue
+            success_pipeline += 1
+            cluster_latency.append(self.process_task_group(task_group))
+
+        logger.info(
+            f"""
+                Cluster: {cluster_name}, Success Ratio: {success_pipeline}/{len(task_groups)}
+                Required Components: {required_tasks_count}, Total tasks: {len(tasks)}
+            """
+        )
+
+        general_title = f"Cluster: <b>{cluster_name}</b>, Completed Ratio: {success_pipeline}/{len(task_groups)}"
+        # flatten the cluster_latency
+        result_df = pd.DataFrame(cluster_latency)
+        # get the column split with _ from right, and left element is the component name
+
+        if len(result_df) != 0:
+            logger.debug(result_df.describe())
+            # result_df.to_csv(settings.LOG_DIR / f"{cluster_name}_benchmark.csv")
+            # to html and return it
+            logger.debug(result_df.describe())
+            desc = result_df.describe().transpose()
+            desc = desc.round(4)
+
+            # add another column
+            # Extract model accuracy from index and add it as a new column
+            desc["latency_type"] = desc.index.str.rsplit("_", n=2).str[1]
+            # then update the index to two columns, first will be component
+            desc.index = desc.index.str.rsplit("_", n=2, expand=True).get_level_values(
+                0
+            )
+            # reset index, get the index to be the column component
+            desc = desc.reset_index()
+            # rename the index to be component
+            desc = desc.rename(columns={"index": "component"})
+            desc_html = self.plot_table(desc, title=f" ({general_title})")
+            plot_html = self.plot_distribution(result_df, title=f" ({general_title})")
+
+            return general_desc + desc_html + plot_html
+        return general_desc
+
+    def process_cluster_detail(self, cluster_name: str) -> str:
+        """
+        Process the cluster in detail
+        Even if the track is not finished, we will still plot it and stop status
+        Args:
+            cluster_name (str): html content
+
+        Returns:
+
+        """
+        task_groups, required_tasks_count, tasks = extract_task_group(cluster_name)
+        general_desc = f"<h2>Cluster: {cluster_name}</h2>"
+        general_desc += f"<p>Required tasks: {required_tasks_count} | Total tasks groups: {len(task_groups)}</p>"
+        # loop through the task groups, if the success task is not == required_tasks_count, then we will skip
+        success_pipeline = 0
+        cluster_latency = []
+        cluster_ts_latency = []
+        cluster_tp_latency = []
+        for track_id, task_group in task_groups.items():
+            success_tasks = [
+                task for task in task_group if task.result_status == "completed"
+            ]
+            if len(success_tasks) == required_tasks_count:
+                # the pipeline is not completed, so we will skip
+                success_pipeline += 1
+            cluster_latency.append(self.process_task_group_detail(task_group))
+            cluster_ts_latency.append(
+                self.process_task_group_detail_timeline(task_group)
+            )
+            cluster_tp_latency.append(
+                self.process_task_group_detail_timeline(task_group, timeline=True)
+            )
+        general_title = f"Cluster: <b>{cluster_name}</b>, Completed Ratio: {success_pipeline}/{len(task_groups)}"
+        result_df = pd.DataFrame(cluster_latency)
+        if len(result_df) == 0:
+            return general_desc
+
+        # only keep the last element in the track_id
+        result_df["track_id"] = result_df["track_id"].str.split("-").str[-1]
+        # get result into multiple level column, which will split current column into multiple level column name
+        # Split the column names into three parts, but only keep the first two
+        split_columns = result_df.columns.str.rsplit("_", n=2, expand=True)
+
+        # we only need the first two level, so we will get the first two level
+        result_df.columns = [
+            split_columns.get_level_values(0),
+            split_columns.get_level_values(1),
+        ]
+        # sort the column
+        track_tasks_html = self.plot_table(result_df, title=f" ({general_title})")
+
+        # cluster ts latency
+        result_ts_df = pd.DataFrame(cluster_ts_latency)
+        # result_ts_df.to_csv(settings.LOG_DIR / f"{cluster_name}_ts_benchmark.csv")
+        if len(result_ts_df) == 0:
+            return track_tasks_html
+        # we will plot a bar
+        ts_stacked_html = self.plot_stacked_timeline(result_ts_df, title=general_title)
+
+        # grab the time point latency, and try to draw time point html
+        result_tp_df = pd.DataFrame(cluster_tp_latency)
+        # result_tp_df.to_csv(settings.LOG_DIR / f"{cluster_name}_tp_benchmark.csv")
+        ts_timepoint_html = self.plot_timestamp_timeline_depth(
+            result_tp_df, title=general_title
+        )
+        return general_desc + track_tasks_html + ts_stacked_html + ts_timepoint_html
+
+    @staticmethod
+    def process_task_group(task_track: List[Task]):
+        """
+        This will process each component, and then extract the transfer and model latency total
+
+        Args:
+            task_track (List[Task]): The task track
+
+        Returns:
+            dict: The benchmark result
+        """
+        result = {
+            "track_id": task_track[0].track_id,
+        }
+        task_names = get_task_names_order(result["track_id"])
+        for task in task_track:
+            latency_profile = task.result_json.get("latency_profile", {})
+            # NOTE: this will require client side do not log overlap durations
+            model_latency = 0
+            transfer_latency = 0
+            logger.debug(latency_profile)
+            task_start_time = None
+            task_end_time = None
+            for key, value in latency_profile.items():
+                if key.startswith("model"):
+                    model_latency += float(value)
+                if key.startswith("transfer"):
+                    transfer_latency += float(value)
+                if key.startswith("ts"):
+                    if key == "ts_start_task":
+                        task_start_time = value
+                    if key == "ts_end_task":
+                        task_end_time = value
+            result[f"{task.task_name}_model_latency"] = model_latency
+            result[f"{task.task_name}_transfer_latency"] = transfer_latency
+            # look for the ts_start_task and ts_end_task, and the overall_latency should be that value
+            # process time into datetime object
+            # ts_end_trigger_emotion_model 2024-07-01T14:58:36.419352
+            if task_start_time and task_end_time:
+                task_start_time_dt = str_to_datetime(task_start_time)
+                task_end_time_dt = str_to_datetime(task_end_time)
+                result[f"{task.task_name}_overall_latency"] = (  # noqa
+                    task_end_time_dt - task_start_time_dt
+                ).total_seconds()
+
+            else:
+                logger.error(f"Task {task.task_name} does not have start and end time")
+                result[f"{task.task_name}_overall_latency"] = (
+                    model_latency + transfer_latency
+                )
+        # total_latency should be the sum of all the overall_latency
+        total_latency = 0
+        for key, value in result.items():
+            if key.endswith("overall_latency"):
+                total_latency += value
+        result["total_latency"] = total_latency
+        # loop all value, get it to decimal 4
+        for key, value in result.items():
+            if isinstance(value, float):
+                result[key] = round(value, 4)
+
+        ordered_result = {
+            "track_id": result["track_id"],
+        }
+        for task_name in task_names:
+            ordered_result[task_name + "_model_latency"] = result[
+                task_name + "_model_latency"
+            ]
+            ordered_result[task_name + "_transfer_latency"] = result[
+                task_name + "_transfer_latency"
+            ]
+            ordered_result[task_name + "_overall_latency"] = result[
+                task_name + "_overall_latency"
+            ]
+        ordered_result["total_latency"] = result["total_latency"]
+        return ordered_result
+
+    @staticmethod
+    def process_task_group_detail(task_track: List[Task]):
+        """
+        This will process each component, and then extract the transfer and model latency total
+
+        Args:
+            task_track (List[Task]): The task track
+
+        Returns:
+            dict: The benchmark result
+        """
+        result = {
+            "track_id": task_track[0].track_id,
+        }
+        task_names = get_task_names_order(result["track_id"])
+        for task in task_track:
+            if task.result_status != "completed":
+                result[f"{task.task_name}_model_latency"] = task.result_status
+                result[f"{task.task_name}_transfer_latency"] = task.result_status
+                result[f"{task.task_name}_overall_latency"] = task.result_status
+                continue
+            latency_profile = task.result_json.get("latency_profile", {})
+            # NOTE: this will require client side do not log overlap durations
+            model_latency = 0
+            transfer_latency = 0
+            logger.debug(latency_profile)
+            task_start_time = None
+            task_end_time = None
+            for key, value in latency_profile.items():
+                if key.startswith("model"):
+                    model_latency += float(value)
+                if key.startswith("transfer"):
+                    transfer_latency += float(value)
+                if key.startswith("ts"):
+                    if key == "ts_start_task":
+                        task_start_time = value
+                    if key == "ts_end_task":
+                        task_end_time = value
+            result[f"{task.task_name}_model_latency"] = model_latency
+            result[f"{task.task_name}_transfer_latency"] = transfer_latency
+            # look for the ts_start_task and ts_end_task, and the overall_latency should be that value
+            # process time into datetime object
+            # ts_end_trigger_emotion_model 2024-07-01T14:58:36.419352
+            if task_start_time and task_end_time:
+                task_start_time_dt = str_to_datetime(task_start_time)
+                task_end_time_dt = str_to_datetime(task_end_time)
+                result[f"{task.task_name}_overall_latency"] = (  # noqa
+                    task_end_time_dt - task_start_time_dt
+                ).total_seconds()
+
+            else:
+                logger.error(f"Task {task.task_name} does not have start and end time")
+                result[f"{task.task_name}_overall_latency"] = (
+                    model_latency + transfer_latency
+                )
+
+        # sort the key to be the same as the cluster order, also if missed, fill it with missing
+        for task_name in task_names:
+            if f"{task_name}_overall_latency" not in result:
+                result[task_name + "_model_latency"] = "missing"
+                result[task_name + "_transfer_latency"] = "missing"
+                result[task_name + "_overall_latency"] = "missing"
+
+        # total_latency should be the sum of all the overall_latency
+        total_latency = 0
+        for key, value in result.items():
+            if key.endswith("overall_latency") and isinstance(value, float):
+                total_latency += value
+            elif key.endswith("overall_latency") and not isinstance(value, float):
+                total_latency = "incomplete"
+                break
+        result["total_latency"] = total_latency
+        # loop all value, get it to decimal 4
+        for key, value in result.items():
+            if isinstance(value, float):
+                result[key] = round(value, 4)
+
+        ordered_result = {
+            "track_id": result["track_id"],
+        }
+        for task_name in task_names:
+            ordered_result[task_name + "_model_latency"] = result[
+                task_name + "_model_latency"
+            ]
+            ordered_result[task_name + "_transfer_latency"] = result[
+                task_name + "_transfer_latency"
+            ]
+            ordered_result[task_name + "_overall_latency"] = result[
+                task_name + "_overall_latency"
+            ]
+
+        ordered_result["total_latency"] = result["total_latency"]
+        return ordered_result
+
+    @staticmethod
+    def process_task_group_detail_timeline(
+        task_track: List[Task], timeline: bool = False
+    ):
+        """
+        Based on the result_json => latency_profile
+        We will gather the time point for each, and then change to the relative second value compared to start point
+
+        If timeline is True, we will only grab the timestamp information.
+        Otherwise, we will calculate the relative time to the start point
+
+        In the end, we will grab the
+        Args:
+            task_track (List[Task]): The task track
+            timeline (bool): If we want to plot the timeline
+
+        Returns:
+
+        """
+        result = {
+            "track_id": task_track[0].track_id,
+        }
+
+        task_names = get_task_names_order(result["track_id"])
+
+        task_results = {}
+        for task in task_track:
+            if task.result_status != "completed":
+                continue
+            latency_profile = task.result_json.get("latency_profile", {})
+            task_result = {}
+            for key, value in latency_profile.items():
+                if key.startswith("ts"):
+                    task_result[key] = str_to_datetime(value)
+
+            if timeline is False:
+                # sort out the whole task_result based on time timestamp
+                # and then calculate the relative time to the previous component
+                sorted_task_result = dict(
+                    sorted(task_result.items(), key=lambda item: item[1])
+                )
+                previous_time = None
+                task_relative_time = {}
+                for key, value in sorted_task_result.items():
+                    if previous_time is None:
+                        task_relative_time[key] = 0
+                    else:
+                        task_relative_time[key] = (
+                            value - previous_time
+                        ).total_seconds()
+                    previous_time = value
+                task_results[task.task_name] = task_relative_time
+            else:
+                task_results[task.task_name] = task_result
+
+        # sort the key to be the same as the cluster order, calculate the value to add up the previous component
+        first_start_task = None
+        for task_name in task_names:
+            if task_name not in task_results:
+                break
+            for key, value in task_results[task_name].items():
+                new_key = f"{task_name}_{key.split('_', 1)[1]}"
+                if key == "ts_start_task":
+                    if first_start_task is None:
+                        first_start_task = value
+                    else:
+                        continue
+                if new_key not in result:
+                    result[new_key] = value
+
+        return result
+
+    @staticmethod
+    def plot_table(df: pd.DataFrame, title: str = "") -> str:
+        """
+        Plot the table
+        Args:
+            df (pd.DataFrame): The dataframe
+            title (str): The title
+
+        Returns:
+            str: The plot in HTML
+        """
+        colors = []
+        for col in df.columns:
+            col_colors = []
+            for val in df[col]:
+                if isinstance(val, float) or isinstance(val, int):
+                    col_colors.append("lavender")
+                else:
+                    if val == "missing":
+                        col_colors.append("lightcoral")
+                    elif val == "started":
+                        col_colors.append("lightyellow")
+                    elif val == "failed":
+                        col_colors.append("lightcoral")
+                    elif val == "pending":
+                        col_colors.append("lightblue")
+                    elif val == "incomplete":
+                        col_colors.append("lightgrey")
+                    else:
+                        col_colors.append("lightgreen")
+            colors.append(col_colors)
+        # Create a Plotly table
+        fig = go.Figure(
+            data=[
+                go.Table(
+                    header=dict(
+                        values=[
+                            (
+                                [f"<b>{c.upper()}</b>" for c in col]
+                                if isinstance(col, tuple)
+                                else f"<b>{col.upper()}</b>"
+                            )
+                            for col in df.columns
+                        ],
+                        fill_color="paleturquoise",
+                        align="left",
+                    ),
+                    cells=dict(
+                        values=[df[col] for col in df.columns],
+                        fill_color=colors,
+                        align="left",
+                    ),
+                )
+            ]
+        )
+        fig.update_layout(
+            title={
+                "text": f"Latency Summary: {title}",
+                "x": 0.5,
+                "xanchor": "center",
+                "yanchor": "top",
+            },
+            #     update margin to be 0
+            margin=dict(l=10, r=10, b=0),
+            # get the height to be whatever it requires
+            height=max((len(df) * 35), 300),
+        )
+        # Update layout for better appearance
+        desc_html = fig.to_html(full_html=False)
+        return desc_html
+
+    @staticmethod
+    def plot_distribution(df: pd.DataFrame, title: str = "") -> str:
+        """
+        Plot the distribution of the latency
+        Args:
+            df (pd.DataFrame): The dataframe
+            title (str): The title
+
+        Returns:
+            str: The plot in HTML
+        """
+        # plot the distribution for each column
+        # Calculate mean and max for each latency column
+        mean_latencies = df[df.columns[1:]].mean()
+        max_latencies = df[df.columns[1:]].max()
+        min_latencies = df[df.columns[1:]].min()
+
+        # Create a Plotly figure
+        fig = go.Figure()
+        # Add min latencies to the figure
+        fig.add_trace(
+            go.Bar(x=min_latencies.index, y=min_latencies.values, name="Min Latency")
+        )
+        # Add mean latencies to the figure
+        fig.add_trace(
+            go.Bar(x=mean_latencies.index, y=mean_latencies.values, name="Mean Latency")
+        )
+
+        # Add max latencies to the figure
+        fig.add_trace(
+            go.Bar(x=max_latencies.index, y=max_latencies.values, name="Max Latency")
+        )
+
+        # Customize the layout
+        fig.update_layout(
+            title={
+                "text": "Latency Distribution" + title,
+                "x": 0.5,
+                "xanchor": "center",
+                "yanchor": "top",
+            },
+            xaxis_title="Component and Latency",
+            yaxis_title="Latency (s)",
+            barmode="group",
+            margin=dict(l=10, r=10, b=0),
+        )
+
+        # Convert Plotly figure to HTML
+        plot_html = fig.to_html(full_html=False)
+        return plot_html
+
+    @staticmethod
+    def plot_stacked_timeline(df: pd.DataFrame, title: str) -> str:
+        """
+        Plot the stacked timeline
+        Args:
+            df (pd.DataFrame): The dataframe
+            title (str): The title
+
+        Returns:
+
+        """
+        # Create a Plotly figure
+        fig = go.Figure()
+        # get the track id to be the stacked one
+        df["track_id"] = df["track_id"].str.split("-").str[-1]
+        # Add a trace for each component
+        for col in df.columns[1:]:
+            fig.add_trace(
+                go.Bar(
+                    y=df["track_id"],
+                    x=df[col],
+                    name=col,
+                    orientation="h",
+                    hovertemplate="%{x}<br>%{fullData.name}<extra></extra>",
+                )
+            )
+
+        # Customize the layout
+        fig.update_layout(
+            title={
+                "text": f"Time Interval in Seconds ({title})",
+                "x": 0.5,
+                "xanchor": "center",
+                "yanchor": "top",
+            },
+            xaxis_title="Relative in Seconds to Start Time",
+            yaxis_title="Track ID",
+            barmode="stack",
+            height=max((len(df) * 35), 300),
+        )
+
+        # Convert Plotly figure to HTML
+        plot_html = fig.to_html(full_html=False)
+        return plot_html
+
+    @staticmethod
+    def plot_timestamp_timeline_depth(df: pd.DataFrame, title: str) -> str:
+        """
+        Plot the timestamp timeline
+        Args:
+            df (pd.DataFrame): The dataframe
+            title (str): The title
+
+        Returns:
+            str: The plot in HTML
+        """
+        fig = go.Figure()
+        y_values = list(range(len(df)))
+        shapes = []
+        for y_value in y_values:
+            shapes.append(
+                dict(
+                    type="line",
+                    xref="paper",
+                    x0=0,
+                    x1=1,
+                    yref="y",
+                    y0=y_value,
+                    y1=y_value,
+                    line=dict(color="grey", width=1, dash="dot"),
+                )
+            )
+        y_labels = []
+
+        legend_added = {}
+        # Use Plotly's qualitative color sequence 'Dark24' to generate a spectrum of colors
+        colors = pcolors.qualitative.Dark24
+
+        # Dynamically generate a color map for each column
+        column_colors = {
+            col: colors[i % len(colors)] for i, col in enumerate(df.columns[1:])
+        }
+        for i, row in df.iterrows():
+            y_value = y_values[i]
+            y_labels.append(row["track_id"].split("-")[-1])
+            for col in df.columns[1:]:
+                if not pd.isna(row[col]):
+                    show_legend = False
+                    if col not in legend_added:
+                        show_legend = True
+                        legend_added[col] = True
+                    fig.add_trace(
+                        go.Scatter(
+                            x=[row[col]],
+                            y=[y_value],
+                            mode="markers",
+                            marker=dict(size=10, color=column_colors[col]),
+                            name=f"{col}",
+                            hovertemplate="%{x}<br>%{fullData.name}<extra></extra>",
+                            showlegend=show_legend,
+                        )
+                    )
+        # Customize the layout
+        fig.update_layout(
+            title={
+                "text": f"Timeline of Events ({title})",
+                "x": 0.5,
+                "xanchor": "center",
+                "yanchor": "top",
+            },
+            xaxis_title="Time",
+            yaxis=dict(
+                showline=False,
+                showgrid=True,
+                zeroline=False,
+                tickvals=y_values,
+                ticktext=y_labels,
+                title="Track ID",
+            ),
+            showlegend=True,
+            shapes=shapes,
+            height=max((len(df) * 35), 300),
+        )
+        # Convert Plotly figure to HTML
+        plot_html = fig.to_html(full_html=False)
+        return plot_html
+
+    @staticmethod
+    def plot_timestamp_timeline(df: pd.DataFrame) -> str:
+        """
+        Plot the timestamp timeline
+        Args:
+            df (pd.DataFrame): The dataframe
+
+        Returns:
+            str: The plot in HTML
+        """
+        fig = go.Figure()
+        y_values = range(len(df))
+
+        for i, row in df.iterrows():
+            y_value = y_values[i]
+            for col in df.columns[1:]:
+                if not pd.isna(row[col]):
+                    fig.add_trace(
+                        go.Scatter(
+                            x=[row[col]],
+                            y=[y_value],
+                            mode="markers",
+                            marker=dict(size=10),
+                            name=f"{col}",
+                            hovertemplate="%{x}<br>%{fullData.name}<extra></extra>",
+                        )
+                    )
+            # break
+        # Customize the layout
+        fig.update_layout(
+            title="Timeline of Time Points",
+            xaxis_title="Time",
+            # show nothing of y, even the label
+            yaxis=dict(
+                showticklabels=False, showline=False, showgrid=False, zeroline=True
+            ),
+            showlegend=True,
+        )
+        # Convert Plotly figure to HTML
+        plot_html = fig.to_html(full_html=False)
+        return plot_html
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(benchmark_cluster=CLUSTER_Q_ETE_CONVERSATION_NAME) + +

+ + +
+ +

Initialize the benchmark +Args: + benchmark_cluster (str): The benchmark cluster

+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
34
+35
+36
+37
+38
+39
+40
+41
def __init__(self, benchmark_cluster: str = CLUSTER_Q_ETE_CONVERSATION_NAME):
+    """
+    Initialize the benchmark
+    Args:
+        benchmark_cluster (str): The benchmark cluster
+    """
+    # if it is a specific name, gather this metric, otherwise, report all existing cluster
+    self.benchmark_cluster = benchmark_cluster
+
+
+
+ +
+ +
+ + +

+ plot_distribution(df, title='') + + + staticmethod + + +

+ + +
+ +

Plot the distribution of the latency +Args: + df (pd.DataFrame): The dataframe + title (str): The title

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

The plot in HTML

+
+
+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
@staticmethod
+def plot_distribution(df: pd.DataFrame, title: str = "") -> str:
+    """
+    Plot the distribution of the latency
+    Args:
+        df (pd.DataFrame): The dataframe
+        title (str): The title
+
+    Returns:
+        str: The plot in HTML
+    """
+    # plot the distribution for each column
+    # Calculate mean and max for each latency column
+    mean_latencies = df[df.columns[1:]].mean()
+    max_latencies = df[df.columns[1:]].max()
+    min_latencies = df[df.columns[1:]].min()
+
+    # Create a Plotly figure
+    fig = go.Figure()
+    # Add min latencies to the figure
+    fig.add_trace(
+        go.Bar(x=min_latencies.index, y=min_latencies.values, name="Min Latency")
+    )
+    # Add mean latencies to the figure
+    fig.add_trace(
+        go.Bar(x=mean_latencies.index, y=mean_latencies.values, name="Mean Latency")
+    )
+
+    # Add max latencies to the figure
+    fig.add_trace(
+        go.Bar(x=max_latencies.index, y=max_latencies.values, name="Max Latency")
+    )
+
+    # Customize the layout
+    fig.update_layout(
+        title={
+            "text": "Latency Distribution" + title,
+            "x": 0.5,
+            "xanchor": "center",
+            "yanchor": "top",
+        },
+        xaxis_title="Component and Latency",
+        yaxis_title="Latency (s)",
+        barmode="group",
+        margin=dict(l=10, r=10, b=0),
+    )
+
+    # Convert Plotly figure to HTML
+    plot_html = fig.to_html(full_html=False)
+    return plot_html
+
+
+
+ +
+ +
+ + +

+ plot_stacked_timeline(df, title) + + + staticmethod + + +

+ + +
+ +

Plot the stacked timeline +Args: + df (pd.DataFrame): The dataframe + title (str): The title

+

Returns:

+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
@staticmethod
+def plot_stacked_timeline(df: pd.DataFrame, title: str) -> str:
+    """
+    Plot the stacked timeline
+    Args:
+        df (pd.DataFrame): The dataframe
+        title (str): The title
+
+    Returns:
+
+    """
+    # Create a Plotly figure
+    fig = go.Figure()
+    # get the track id to be the stacked one
+    df["track_id"] = df["track_id"].str.split("-").str[-1]
+    # Add a trace for each component
+    for col in df.columns[1:]:
+        fig.add_trace(
+            go.Bar(
+                y=df["track_id"],
+                x=df[col],
+                name=col,
+                orientation="h",
+                hovertemplate="%{x}<br>%{fullData.name}<extra></extra>",
+            )
+        )
+
+    # Customize the layout
+    fig.update_layout(
+        title={
+            "text": f"Time Interval in Seconds ({title})",
+            "x": 0.5,
+            "xanchor": "center",
+            "yanchor": "top",
+        },
+        xaxis_title="Relative in Seconds to Start Time",
+        yaxis_title="Track ID",
+        barmode="stack",
+        height=max((len(df) * 35), 300),
+    )
+
+    # Convert Plotly figure to HTML
+    plot_html = fig.to_html(full_html=False)
+    return plot_html
+
+
+
+ +
+ +
+ + +

+ plot_table(df, title='') + + + staticmethod + + +

+ + +
+ +

Plot the table +Args: + df (pd.DataFrame): The dataframe + title (str): The title

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

The plot in HTML

+
+
+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
@staticmethod
+def plot_table(df: pd.DataFrame, title: str = "") -> str:
+    """
+    Plot the table
+    Args:
+        df (pd.DataFrame): The dataframe
+        title (str): The title
+
+    Returns:
+        str: The plot in HTML
+    """
+    colors = []
+    for col in df.columns:
+        col_colors = []
+        for val in df[col]:
+            if isinstance(val, float) or isinstance(val, int):
+                col_colors.append("lavender")
+            else:
+                if val == "missing":
+                    col_colors.append("lightcoral")
+                elif val == "started":
+                    col_colors.append("lightyellow")
+                elif val == "failed":
+                    col_colors.append("lightcoral")
+                elif val == "pending":
+                    col_colors.append("lightblue")
+                elif val == "incomplete":
+                    col_colors.append("lightgrey")
+                else:
+                    col_colors.append("lightgreen")
+        colors.append(col_colors)
+    # Create a Plotly table
+    fig = go.Figure(
+        data=[
+            go.Table(
+                header=dict(
+                    values=[
+                        (
+                            [f"<b>{c.upper()}</b>" for c in col]
+                            if isinstance(col, tuple)
+                            else f"<b>{col.upper()}</b>"
+                        )
+                        for col in df.columns
+                    ],
+                    fill_color="paleturquoise",
+                    align="left",
+                ),
+                cells=dict(
+                    values=[df[col] for col in df.columns],
+                    fill_color=colors,
+                    align="left",
+                ),
+            )
+        ]
+    )
+    fig.update_layout(
+        title={
+            "text": f"Latency Summary: {title}",
+            "x": 0.5,
+            "xanchor": "center",
+            "yanchor": "top",
+        },
+        #     update margin to be 0
+        margin=dict(l=10, r=10, b=0),
+        # get the height to be whatever it requires
+        height=max((len(df) * 35), 300),
+    )
+    # Update layout for better appearance
+    desc_html = fig.to_html(full_html=False)
+    return desc_html
+
+
+
+ +
+ +
+ + +

+ plot_timestamp_timeline(df) + + + staticmethod + + +

+ + +
+ +

Plot the timestamp timeline +Args: + df (pd.DataFrame): The dataframe

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

The plot in HTML

+
+
+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
@staticmethod
+def plot_timestamp_timeline(df: pd.DataFrame) -> str:
+    """
+    Plot the timestamp timeline
+    Args:
+        df (pd.DataFrame): The dataframe
+
+    Returns:
+        str: The plot in HTML
+    """
+    fig = go.Figure()
+    y_values = range(len(df))
+
+    for i, row in df.iterrows():
+        y_value = y_values[i]
+        for col in df.columns[1:]:
+            if not pd.isna(row[col]):
+                fig.add_trace(
+                    go.Scatter(
+                        x=[row[col]],
+                        y=[y_value],
+                        mode="markers",
+                        marker=dict(size=10),
+                        name=f"{col}",
+                        hovertemplate="%{x}<br>%{fullData.name}<extra></extra>",
+                    )
+                )
+        # break
+    # Customize the layout
+    fig.update_layout(
+        title="Timeline of Time Points",
+        xaxis_title="Time",
+        # show nothing of y, even the label
+        yaxis=dict(
+            showticklabels=False, showline=False, showgrid=False, zeroline=True
+        ),
+        showlegend=True,
+    )
+    # Convert Plotly figure to HTML
+    plot_html = fig.to_html(full_html=False)
+    return plot_html
+
+
+
+ +
+ +
+ + +

+ plot_timestamp_timeline_depth(df, title) + + + staticmethod + + +

+ + +
+ +

Plot the timestamp timeline +Args: + df (pd.DataFrame): The dataframe + title (str): The title

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

The plot in HTML

+
+
+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
@staticmethod
+def plot_timestamp_timeline_depth(df: pd.DataFrame, title: str) -> str:
+    """
+    Plot the timestamp timeline
+    Args:
+        df (pd.DataFrame): The dataframe
+        title (str): The title
+
+    Returns:
+        str: The plot in HTML
+    """
+    fig = go.Figure()
+    y_values = list(range(len(df)))
+    shapes = []
+    for y_value in y_values:
+        shapes.append(
+            dict(
+                type="line",
+                xref="paper",
+                x0=0,
+                x1=1,
+                yref="y",
+                y0=y_value,
+                y1=y_value,
+                line=dict(color="grey", width=1, dash="dot"),
+            )
+        )
+    y_labels = []
+
+    legend_added = {}
+    # Use Plotly's qualitative color sequence 'Dark24' to generate a spectrum of colors
+    colors = pcolors.qualitative.Dark24
+
+    # Dynamically generate a color map for each column
+    column_colors = {
+        col: colors[i % len(colors)] for i, col in enumerate(df.columns[1:])
+    }
+    for i, row in df.iterrows():
+        y_value = y_values[i]
+        y_labels.append(row["track_id"].split("-")[-1])
+        for col in df.columns[1:]:
+            if not pd.isna(row[col]):
+                show_legend = False
+                if col not in legend_added:
+                    show_legend = True
+                    legend_added[col] = True
+                fig.add_trace(
+                    go.Scatter(
+                        x=[row[col]],
+                        y=[y_value],
+                        mode="markers",
+                        marker=dict(size=10, color=column_colors[col]),
+                        name=f"{col}",
+                        hovertemplate="%{x}<br>%{fullData.name}<extra></extra>",
+                        showlegend=show_legend,
+                    )
+                )
+    # Customize the layout
+    fig.update_layout(
+        title={
+            "text": f"Timeline of Events ({title})",
+            "x": 0.5,
+            "xanchor": "center",
+            "yanchor": "top",
+        },
+        xaxis_title="Time",
+        yaxis=dict(
+            showline=False,
+            showgrid=True,
+            zeroline=False,
+            tickvals=y_values,
+            ticktext=y_labels,
+            title="Track ID",
+        ),
+        showlegend=True,
+        shapes=shapes,
+        height=max((len(df) * 35), 300),
+    )
+    # Convert Plotly figure to HTML
+    plot_html = fig.to_html(full_html=False)
+    return plot_html
+
+
+
+ +
+ +
+ + +

+ process_cluster(cluster_name) + +

+ + +
+ +

Process the cluster +Args: + cluster_name (str): The cluster name

+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
def process_cluster(self, cluster_name: str):
+    """
+    Process the cluster
+    Args:
+        cluster_name (str): The cluster name
+    """
+    task_groups, required_tasks_count, tasks = extract_task_group(cluster_name)
+    general_desc = f"<h2>Cluster: {cluster_name}</h2>"
+    general_desc += f"<p>Required tasks: {required_tasks_count} | Total tasks groups: {len(task_groups)}</p>"
+    # loop through the task groups, if the success task is not == required_tasks_count, then we will skip
+    success_pipeline = 0
+    cluster_latency = []
+    for track_id, task_group in task_groups.items():
+        success_tasks = [
+            task for task in task_group if task.result_status == "completed"
+        ]
+        if len(success_tasks) != required_tasks_count:
+            # the pipeline is not completed, so we will skip
+            continue
+        success_pipeline += 1
+        cluster_latency.append(self.process_task_group(task_group))
+
+    logger.info(
+        f"""
+            Cluster: {cluster_name}, Success Ratio: {success_pipeline}/{len(task_groups)}
+            Required Components: {required_tasks_count}, Total tasks: {len(tasks)}
+        """
+    )
+
+    general_title = f"Cluster: <b>{cluster_name}</b>, Completed Ratio: {success_pipeline}/{len(task_groups)}"
+    # flatten the cluster_latency
+    result_df = pd.DataFrame(cluster_latency)
+    # get the column split with _ from right, and left element is the component name
+
+    if len(result_df) != 0:
+        logger.debug(result_df.describe())
+        # result_df.to_csv(settings.LOG_DIR / f"{cluster_name}_benchmark.csv")
+        # to html and return it
+        logger.debug(result_df.describe())
+        desc = result_df.describe().transpose()
+        desc = desc.round(4)
+
+        # add another column
+        # Extract model accuracy from index and add it as a new column
+        desc["latency_type"] = desc.index.str.rsplit("_", n=2).str[1]
+        # then update the index to two columns, first will be component
+        desc.index = desc.index.str.rsplit("_", n=2, expand=True).get_level_values(
+            0
+        )
+        # reset index, get the index to be the column component
+        desc = desc.reset_index()
+        # rename the index to be component
+        desc = desc.rename(columns={"index": "component"})
+        desc_html = self.plot_table(desc, title=f" ({general_title})")
+        plot_html = self.plot_distribution(result_df, title=f" ({general_title})")
+
+        return general_desc + desc_html + plot_html
+    return general_desc
+
+
+
+ +
+ +
+ + +

+ process_cluster_detail(cluster_name) + +

+ + +
+ +

Process the cluster in detail +Even if the track is not finished, we will still plot it and stop status +Args: + cluster_name (str): html content

+

Returns:

+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
def process_cluster_detail(self, cluster_name: str) -> str:
+    """
+    Process the cluster in detail
+    Even if the track is not finished, we will still plot it and stop status
+    Args:
+        cluster_name (str): html content
+
+    Returns:
+
+    """
+    task_groups, required_tasks_count, tasks = extract_task_group(cluster_name)
+    general_desc = f"<h2>Cluster: {cluster_name}</h2>"
+    general_desc += f"<p>Required tasks: {required_tasks_count} | Total tasks groups: {len(task_groups)}</p>"
+    # loop through the task groups, if the success task is not == required_tasks_count, then we will skip
+    success_pipeline = 0
+    cluster_latency = []
+    cluster_ts_latency = []
+    cluster_tp_latency = []
+    for track_id, task_group in task_groups.items():
+        success_tasks = [
+            task for task in task_group if task.result_status == "completed"
+        ]
+        if len(success_tasks) == required_tasks_count:
+            # the pipeline is not completed, so we will skip
+            success_pipeline += 1
+        cluster_latency.append(self.process_task_group_detail(task_group))
+        cluster_ts_latency.append(
+            self.process_task_group_detail_timeline(task_group)
+        )
+        cluster_tp_latency.append(
+            self.process_task_group_detail_timeline(task_group, timeline=True)
+        )
+    general_title = f"Cluster: <b>{cluster_name}</b>, Completed Ratio: {success_pipeline}/{len(task_groups)}"
+    result_df = pd.DataFrame(cluster_latency)
+    if len(result_df) == 0:
+        return general_desc
+
+    # only keep the last element in the track_id
+    result_df["track_id"] = result_df["track_id"].str.split("-").str[-1]
+    # get result into multiple level column, which will split current column into multiple level column name
+    # Split the column names into three parts, but only keep the first two
+    split_columns = result_df.columns.str.rsplit("_", n=2, expand=True)
+
+    # we only need the first two level, so we will get the first two level
+    result_df.columns = [
+        split_columns.get_level_values(0),
+        split_columns.get_level_values(1),
+    ]
+    # sort the column
+    track_tasks_html = self.plot_table(result_df, title=f" ({general_title})")
+
+    # cluster ts latency
+    result_ts_df = pd.DataFrame(cluster_ts_latency)
+    # result_ts_df.to_csv(settings.LOG_DIR / f"{cluster_name}_ts_benchmark.csv")
+    if len(result_ts_df) == 0:
+        return track_tasks_html
+    # we will plot a bar
+    ts_stacked_html = self.plot_stacked_timeline(result_ts_df, title=general_title)
+
+    # grab the time point latency, and try to draw time point html
+    result_tp_df = pd.DataFrame(cluster_tp_latency)
+    # result_tp_df.to_csv(settings.LOG_DIR / f"{cluster_name}_tp_benchmark.csv")
+    ts_timepoint_html = self.plot_timestamp_timeline_depth(
+        result_tp_df, title=general_title
+    )
+    return general_desc + track_tasks_html + ts_stacked_html + ts_timepoint_html
+
+
+
+ +
+ +
+ + +

+ process_task_group(task_track) + + + staticmethod + + +

+ + +
+ +

This will process each component, and then extract the transfer and model latency total

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task_track + List[Task] + +
+

The task track

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
dict + +
+

The benchmark result

+
+
+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
@staticmethod
+def process_task_group(task_track: List[Task]):
+    """
+    This will process each component, and then extract the transfer and model latency total
+
+    Args:
+        task_track (List[Task]): The task track
+
+    Returns:
+        dict: The benchmark result
+    """
+    result = {
+        "track_id": task_track[0].track_id,
+    }
+    task_names = get_task_names_order(result["track_id"])
+    for task in task_track:
+        latency_profile = task.result_json.get("latency_profile", {})
+        # NOTE: this will require client side do not log overlap durations
+        model_latency = 0
+        transfer_latency = 0
+        logger.debug(latency_profile)
+        task_start_time = None
+        task_end_time = None
+        for key, value in latency_profile.items():
+            if key.startswith("model"):
+                model_latency += float(value)
+            if key.startswith("transfer"):
+                transfer_latency += float(value)
+            if key.startswith("ts"):
+                if key == "ts_start_task":
+                    task_start_time = value
+                if key == "ts_end_task":
+                    task_end_time = value
+        result[f"{task.task_name}_model_latency"] = model_latency
+        result[f"{task.task_name}_transfer_latency"] = transfer_latency
+        # look for the ts_start_task and ts_end_task, and the overall_latency should be that value
+        # process time into datetime object
+        # ts_end_trigger_emotion_model 2024-07-01T14:58:36.419352
+        if task_start_time and task_end_time:
+            task_start_time_dt = str_to_datetime(task_start_time)
+            task_end_time_dt = str_to_datetime(task_end_time)
+            result[f"{task.task_name}_overall_latency"] = (  # noqa
+                task_end_time_dt - task_start_time_dt
+            ).total_seconds()
+
+        else:
+            logger.error(f"Task {task.task_name} does not have start and end time")
+            result[f"{task.task_name}_overall_latency"] = (
+                model_latency + transfer_latency
+            )
+    # total_latency should be the sum of all the overall_latency
+    total_latency = 0
+    for key, value in result.items():
+        if key.endswith("overall_latency"):
+            total_latency += value
+    result["total_latency"] = total_latency
+    # loop all value, get it to decimal 4
+    for key, value in result.items():
+        if isinstance(value, float):
+            result[key] = round(value, 4)
+
+    ordered_result = {
+        "track_id": result["track_id"],
+    }
+    for task_name in task_names:
+        ordered_result[task_name + "_model_latency"] = result[
+            task_name + "_model_latency"
+        ]
+        ordered_result[task_name + "_transfer_latency"] = result[
+            task_name + "_transfer_latency"
+        ]
+        ordered_result[task_name + "_overall_latency"] = result[
+            task_name + "_overall_latency"
+        ]
+    ordered_result["total_latency"] = result["total_latency"]
+    return ordered_result
+
+
+
+ +
+ +
+ + +

+ process_task_group_detail(task_track) + + + staticmethod + + +

+ + +
+ +

This will process each component, and then extract the transfer and model latency total

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task_track + List[Task] + +
+

The task track

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
dict + +
+

The benchmark result

+
+
+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
@staticmethod
+def process_task_group_detail(task_track: List[Task]):
+    """
+    This will process each component, and then extract the transfer and model latency total
+
+    Args:
+        task_track (List[Task]): The task track
+
+    Returns:
+        dict: The benchmark result
+    """
+    result = {
+        "track_id": task_track[0].track_id,
+    }
+    task_names = get_task_names_order(result["track_id"])
+    for task in task_track:
+        if task.result_status != "completed":
+            result[f"{task.task_name}_model_latency"] = task.result_status
+            result[f"{task.task_name}_transfer_latency"] = task.result_status
+            result[f"{task.task_name}_overall_latency"] = task.result_status
+            continue
+        latency_profile = task.result_json.get("latency_profile", {})
+        # NOTE: this will require client side do not log overlap durations
+        model_latency = 0
+        transfer_latency = 0
+        logger.debug(latency_profile)
+        task_start_time = None
+        task_end_time = None
+        for key, value in latency_profile.items():
+            if key.startswith("model"):
+                model_latency += float(value)
+            if key.startswith("transfer"):
+                transfer_latency += float(value)
+            if key.startswith("ts"):
+                if key == "ts_start_task":
+                    task_start_time = value
+                if key == "ts_end_task":
+                    task_end_time = value
+        result[f"{task.task_name}_model_latency"] = model_latency
+        result[f"{task.task_name}_transfer_latency"] = transfer_latency
+        # look for the ts_start_task and ts_end_task, and the overall_latency should be that value
+        # process time into datetime object
+        # ts_end_trigger_emotion_model 2024-07-01T14:58:36.419352
+        if task_start_time and task_end_time:
+            task_start_time_dt = str_to_datetime(task_start_time)
+            task_end_time_dt = str_to_datetime(task_end_time)
+            result[f"{task.task_name}_overall_latency"] = (  # noqa
+                task_end_time_dt - task_start_time_dt
+            ).total_seconds()
+
+        else:
+            logger.error(f"Task {task.task_name} does not have start and end time")
+            result[f"{task.task_name}_overall_latency"] = (
+                model_latency + transfer_latency
+            )
+
+    # sort the key to be the same as the cluster order, also if missed, fill it with missing
+    for task_name in task_names:
+        if f"{task_name}_overall_latency" not in result:
+            result[task_name + "_model_latency"] = "missing"
+            result[task_name + "_transfer_latency"] = "missing"
+            result[task_name + "_overall_latency"] = "missing"
+
+    # total_latency should be the sum of all the overall_latency
+    total_latency = 0
+    for key, value in result.items():
+        if key.endswith("overall_latency") and isinstance(value, float):
+            total_latency += value
+        elif key.endswith("overall_latency") and not isinstance(value, float):
+            total_latency = "incomplete"
+            break
+    result["total_latency"] = total_latency
+    # loop all value, get it to decimal 4
+    for key, value in result.items():
+        if isinstance(value, float):
+            result[key] = round(value, 4)
+
+    ordered_result = {
+        "track_id": result["track_id"],
+    }
+    for task_name in task_names:
+        ordered_result[task_name + "_model_latency"] = result[
+            task_name + "_model_latency"
+        ]
+        ordered_result[task_name + "_transfer_latency"] = result[
+            task_name + "_transfer_latency"
+        ]
+        ordered_result[task_name + "_overall_latency"] = result[
+            task_name + "_overall_latency"
+        ]
+
+    ordered_result["total_latency"] = result["total_latency"]
+    return ordered_result
+
+
+
+ +
+ +
+ + +

+ process_task_group_detail_timeline(task_track, timeline=False) + + + staticmethod + + +

+ + +
+ +

Based on the result_json => latency_profile +We will gather the time point for each, and then change to the relative second value compared to start point

+

If timeline is True, we will only grab the timestamp information. +Otherwise, we will calculate the relative time to the start point

+

In the end, we will grab the +Args: + task_track (List[Task]): The task track + timeline (bool): If we want to plot the timeline

+

Returns:

+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
@staticmethod
+def process_task_group_detail_timeline(
+    task_track: List[Task], timeline: bool = False
+):
+    """
+    Based on the result_json => latency_profile
+    We will gather the time point for each, and then change to the relative second value compared to start point
+
+    If timeline is True, we will only grab the timestamp information.
+    Otherwise, we will calculate the relative time to the start point
+
+    In the end, we will grab the
+    Args:
+        task_track (List[Task]): The task track
+        timeline (bool): If we want to plot the timeline
+
+    Returns:
+
+    """
+    result = {
+        "track_id": task_track[0].track_id,
+    }
+
+    task_names = get_task_names_order(result["track_id"])
+
+    task_results = {}
+    for task in task_track:
+        if task.result_status != "completed":
+            continue
+        latency_profile = task.result_json.get("latency_profile", {})
+        task_result = {}
+        for key, value in latency_profile.items():
+            if key.startswith("ts"):
+                task_result[key] = str_to_datetime(value)
+
+        if timeline is False:
+            # sort out the whole task_result based on time timestamp
+            # and then calculate the relative time to the previous component
+            sorted_task_result = dict(
+                sorted(task_result.items(), key=lambda item: item[1])
+            )
+            previous_time = None
+            task_relative_time = {}
+            for key, value in sorted_task_result.items():
+                if previous_time is None:
+                    task_relative_time[key] = 0
+                else:
+                    task_relative_time[key] = (
+                        value - previous_time
+                    ).total_seconds()
+                previous_time = value
+            task_results[task.task_name] = task_relative_time
+        else:
+            task_results[task.task_name] = task_result
+
+    # sort the key to be the same as the cluster order, calculate the value to add up the previous component
+    first_start_task = None
+    for task_name in task_names:
+        if task_name not in task_results:
+            break
+        for key, value in task_results[task_name].items():
+            new_key = f"{task_name}_{key.split('_', 1)[1]}"
+            if key == "ts_start_task":
+                if first_start_task is None:
+                    first_start_task = value
+                else:
+                    continue
+            if new_key not in result:
+                result[new_key] = value
+
+    return result
+
+
+
+ +
+ +
+ + +

+ run() + +

+ + +
+ +

Run the benchmark

+ +
+ Source code in API/orchestrator/metrics/latency_benchmark.py +
43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
def run(self):
+    """
+    Run the benchmark
+    """
+    html_content = ""
+    if self.benchmark_cluster == "all":
+        for cluster_name in CLUSTERS.keys():
+            # add a divider
+            html_content += "<hr>"
+            html_content += self.process_cluster(cluster_name)
+    else:
+        if self.benchmark_cluster not in CLUSTERS:
+            raise ValueError(f"Cluster {self.benchmark_cluster} not found")
+        html_content += "<hr>"
+        html_content += self.process_cluster(self.benchmark_cluster)
+    return html_content
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/metrics/utils/index.html b/Sources/API/orchestrator/metrics/utils/index.html new file mode 100644 index 00000000..0c28b7ff --- /dev/null +++ b/Sources/API/orchestrator/metrics/utils/index.html @@ -0,0 +1,4938 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Utils - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Utils

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ extract_task_group(cluster_name) + +

+ + +
+ +

Extract the task group +Args: + cluster_name (str): The cluster name

+

Returns:

+ +
+ Source code in API/orchestrator/metrics/utils.py +
11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
def extract_task_group(
+    cluster_name: str,
+) -> Tuple[Dict[str, List[Task]], int, List[Task]]:
+    """
+    Extract the task group
+    Args:
+        cluster_name (str): The cluster name
+
+    Returns:
+
+    """
+    cluster = CLUSTERS.get(cluster_name, None)
+    if cluster is None:
+        raise ValueError(f"Cluster {cluster_name} not found")
+
+    required_tasks = [
+        item for item in cluster.values() if item["component_type"] == "task"
+    ]
+    required_tasks_count = len(required_tasks)
+    logger.info(f"Cluster: {cluster_name}, Required tasks: {required_tasks_count}")
+    # get all related tasks, the track_id is like T_{cluster_name}_XXX
+    tasks = Task.objects.filter(track_id__startswith=f"T-{cluster_name}-")
+    logger.info(f"Cluster: {cluster_name}, Total tasks: {len(tasks)}")
+    # group the tasks by the track_id
+    task_groups = {}
+    for task in tasks:
+        track_id = task.track_id
+        if track_id not in task_groups:
+            task_groups[track_id] = []
+        task_groups[track_id].append(task)
+
+    # sort the task groups by the first task created time
+    task_groups = dict(
+        sorted(
+            task_groups.items(),
+            key=lambda x: x[1][0].created_at if len(x[1]) > 0 else None,
+        )
+    )
+    return task_groups, required_tasks_count, tasks
+
+
+
+ +
+ +
+ + +

+ get_task_names_order(track_id) + +

+ + +
+ +

Get the task names order +Args: + track_id (str): The track ID

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + List[str] + +
+

The task names order

+
+
+ +
+ Source code in API/orchestrator/metrics/utils.py +
52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
def get_task_names_order(track_id: str) -> List[str]:
+    """
+    Get the task names order
+    Args:
+        track_id (str): The track ID
+
+    Returns:
+        str: The task names order
+
+    """
+    cluster_name = track_id.split("-")[1]
+    cluster = CLUSTERS.get(cluster_name)
+    task_name_order = [
+        item for item in cluster.values() if item["component_type"] == "task"
+    ]
+    task_name_order = sorted(task_name_order, key=lambda x: x["order"])
+    task_names = [item["task_name"] for item in task_name_order]
+    return task_names
+
+
+
+ +
+ +
+ + +

+ str_to_datetime(datetime_str) + +

+ + +
+ +

Convert the datetime string to datetime object +Args: + datetime_str (str): the string datetime, like this: 2024-07-01T14:58:36.419352

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
datetime + datetime + +
+

The datetime object

+
+
+ +
+ Source code in API/orchestrator/metrics/utils.py +
72
+73
+74
+75
+76
+77
+78
+79
+80
+81
def str_to_datetime(datetime_str: str) -> datetime:
+    """
+    Convert the datetime string to datetime object
+    Args:
+        datetime_str (str): the string datetime, like this: 2024-07-01T14:58:36.419352
+
+    Returns:
+        datetime: The datetime object
+    """
+    return datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%S.%f")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/migrations/0001_init/index.html b/Sources/API/orchestrator/migrations/0001_init/index.html new file mode 100644 index 00000000..0debb323 --- /dev/null +++ b/Sources/API/orchestrator/migrations/0001_init/index.html @@ -0,0 +1,4575 @@ + + + + + + + + + + + + + + + + + + + + + 0001 init - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

0001 init

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/models/index.html b/Sources/API/orchestrator/models/index.html new file mode 100644 index 00000000..cf30a0a1 --- /dev/null +++ b/Sources/API/orchestrator/models/index.html @@ -0,0 +1,5306 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Models - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Models

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Task + + +

+ + +
+

+ Bases: Model

+ + +
+ Source code in API/orchestrator/models.py +
 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
class Task(models.Model):
+    user = models.ForeignKey(
+        User,
+        on_delete=models.SET_NULL,
+        null=True,
+        blank=True,
+        related_name="tasks",
+        help_text="Select the user",
+    )
+    name = models.CharField(
+        max_length=100, help_text="A unique name to track the cluster of tasks"
+    )
+
+    task_name = models.CharField(
+        max_length=100,
+        help_text="The name of the task",
+    )
+    parameters = models.JSONField(
+        default=dict,
+        blank=True,
+        null=True,
+        help_text="Enter the parameters for the task",
+    )
+    result_status = models.CharField(
+        max_length=100,
+        choices=[
+            ("pending", "Pending"),
+            ("completed", "Completed"),
+            ("failed", "Failed"),
+            ("started", "Started"),
+            ("cancelled", "Cancelled"),
+        ],
+        default="pending",
+    )
+    result_json = models.JSONField(
+        default=dict,
+        blank=True,
+        null=True,
+        help_text="The result of the task",
+    )
+    description = models.TextField(blank=True, null=True)
+    track_id = models.CharField(
+        max_length=100,
+        blank=True,
+        null=True,
+        help_text="The tracking ID of the task, will start with T-{cluster_name}-{id}",
+    )
+    created_at = models.DateTimeField(auto_now_add=True)
+    updated_at = models.DateTimeField(auto_now=True)
+
+    def __str__(self):
+        return self.name
+
+    @classmethod
+    def create_task(
+        cls,
+        user: Optional[User],
+        name: str,
+        task_name: str,
+        parameters: dict,
+        description: str = "",
+        track_id: Optional[str] = None,
+    ):
+        """
+        Create a task
+        Args:
+            user (User): The user who created the task
+            name (str): The name of the task
+            task_name (str): The name of the task
+            parameters (dict): The parameters for the task
+            description (str): The description of the task
+            track_id (str): The tracking ID of the task, will start with T-{cluster_name}-{id}
+
+        Returns:
+
+        """
+        task = cls(
+            user=user,
+            name=name,
+            task_name=task_name,
+            parameters=parameters,
+            description=description,
+            track_id=track_id,
+        )
+        task.save()
+        return task
+
+    @staticmethod
+    def init_track_id(name: str) -> str:
+        """
+        Initialize the track ID
+        Args:
+            name (str): The name of the task
+
+        Returns:
+            str: The track ID
+        """
+        uid = str(uuid4())
+        # replace the - with ""
+        uid = uid.replace("-", "")
+        return f"T-{name}-{uid}"
+
+    # override the save method, to call the chain
+    def save(self, *args, **kwargs):
+        # if it is updated, then we need to call the chain
+        if self.result_status == "completed":
+            completed_task.send(sender=self, data=self.__dict__)
+        super().save(*args, **kwargs)
+
+    @staticmethod
+    def get_task_name_choices():
+        """
+        Get dynamic task name choices
+        Returns:
+            list: List of tuples containing task name choices
+        """
+        # Here you can fetch the choices from an external source or database
+        return [
+            ("quantization_llm", "Quantization LLM"),
+            ("hf_llm", "HF LLM"),
+            ("emotion_detection", "Emotion Detection"),
+            ("speech2text", "Speech2Text"),
+            ("text2speech", "Text2Speech"),
+            ("general_ml", "General ML"),
+            ("openai_speech2text", "OpenAI Speech2Text"),
+            ("openai_gpt_4o", "OpenAI GPT4o"),
+            ("openai_gpt_35", "OpenAI GPT3.5"),
+            ("openai_text2speech", "OpenAI Text2Speech"),
+            ("openai_gpt_4o_text_and_image", "OpenAI GPT4o Text and Image"),
+            ("openai_gpt_4o_text_only", "OpenAI GPT4o Text"),
+            ("rag", "RAG"),
+        ]
+
+    @staticmethod
+    def task_ml_task_mapping() -> dict:
+        return {
+            "quantization_llm": "text_generation",
+            "hf_llm": "text_generation",
+            "emotion_detection": "emotion_detection",
+            "speech2text": "speech2text",
+            "text2speech": "text2speech",
+            "general_ml": "general_ml",
+            "openai_speech2text": "speech2text",
+            "openai_gpt_4o": "text_generation",
+            "openai_gpt_35": "text_generation",
+            "openai_text2speech": "text2speech",
+            "openai_gpt_4o_text_and_image": "text_generation",
+            "openai_gpt_4o_text_only": "text_generation",
+            "rag": "rag",
+        }
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ create_task(user, name, task_name, parameters, description='', track_id=None) + + + classmethod + + +

+ + +
+ +

Create a task +Args: + user (User): The user who created the task + name (str): The name of the task + task_name (str): The name of the task + parameters (dict): The parameters for the task + description (str): The description of the task + track_id (str): The tracking ID of the task, will start with T-{cluster_name}-{id}

+

Returns:

+ +
+ Source code in API/orchestrator/models.py +
64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
@classmethod
+def create_task(
+    cls,
+    user: Optional[User],
+    name: str,
+    task_name: str,
+    parameters: dict,
+    description: str = "",
+    track_id: Optional[str] = None,
+):
+    """
+    Create a task
+    Args:
+        user (User): The user who created the task
+        name (str): The name of the task
+        task_name (str): The name of the task
+        parameters (dict): The parameters for the task
+        description (str): The description of the task
+        track_id (str): The tracking ID of the task, will start with T-{cluster_name}-{id}
+
+    Returns:
+
+    """
+    task = cls(
+        user=user,
+        name=name,
+        task_name=task_name,
+        parameters=parameters,
+        description=description,
+        track_id=track_id,
+    )
+    task.save()
+    return task
+
+
+
+ +
+ +
+ + +

+ get_task_name_choices() + + + staticmethod + + +

+ + +
+ +

Get dynamic task name choices +Returns: + list: List of tuples containing task name choices

+ +
+ Source code in API/orchestrator/models.py +
120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
@staticmethod
+def get_task_name_choices():
+    """
+    Get dynamic task name choices
+    Returns:
+        list: List of tuples containing task name choices
+    """
+    # Here you can fetch the choices from an external source or database
+    return [
+        ("quantization_llm", "Quantization LLM"),
+        ("hf_llm", "HF LLM"),
+        ("emotion_detection", "Emotion Detection"),
+        ("speech2text", "Speech2Text"),
+        ("text2speech", "Text2Speech"),
+        ("general_ml", "General ML"),
+        ("openai_speech2text", "OpenAI Speech2Text"),
+        ("openai_gpt_4o", "OpenAI GPT4o"),
+        ("openai_gpt_35", "OpenAI GPT3.5"),
+        ("openai_text2speech", "OpenAI Text2Speech"),
+        ("openai_gpt_4o_text_and_image", "OpenAI GPT4o Text and Image"),
+        ("openai_gpt_4o_text_only", "OpenAI GPT4o Text"),
+        ("rag", "RAG"),
+    ]
+
+
+
+ +
+ +
+ + +

+ init_track_id(name) + + + staticmethod + + +

+ + +
+ +

Initialize the track ID +Args: + name (str): The name of the task

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

The track ID

+
+
+ +
+ Source code in API/orchestrator/models.py +
 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
@staticmethod
+def init_track_id(name: str) -> str:
+    """
+    Initialize the track ID
+    Args:
+        name (str): The name of the task
+
+    Returns:
+        str: The track ID
+    """
+    uid = str(uuid4())
+    # replace the - with ""
+    uid = uid.replace("-", "")
+    return f"T-{name}-{uid}"
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/serializers/index.html b/Sources/API/orchestrator/serializers/index.html new file mode 100644 index 00000000..7733307e --- /dev/null +++ b/Sources/API/orchestrator/serializers/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Serializers - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Serializers

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/tests/index.html b/Sources/API/orchestrator/tests/index.html new file mode 100644 index 00000000..832d903c --- /dev/null +++ b/Sources/API/orchestrator/tests/index.html @@ -0,0 +1,4619 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Tests - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Tests

+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/urls/index.html b/Sources/API/orchestrator/urls/index.html new file mode 100644 index 00000000..49123c81 --- /dev/null +++ b/Sources/API/orchestrator/urls/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + URLs - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

URLs

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/API/orchestrator/views/index.html b/Sources/API/orchestrator/views/index.html new file mode 100644 index 00000000..f836283b --- /dev/null +++ b/Sources/API/orchestrator/views/index.html @@ -0,0 +1,5549 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Views - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Views

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ QueueTaskViewSet + + +

+ + +
+

+ Bases: ViewSet

+ + +

A ViewSet for queuing AI tasks generally

+ +
+ Source code in API/orchestrator/views.py +
 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
class QueueTaskViewSet(viewsets.ViewSet):
+    """
+    A ViewSet for queuing AI tasks generally
+
+    """
+
+    # This ensures that only authenticated users can access these endpoints
+    permission_classes = [IsAuthenticated]
+
+    @swagger_auto_schema(
+        operation_summary="Queue an AI task",
+        operation_description="This will include LLM, STT, and other AI tasks",
+        request_body=TaskSerializer,
+        responses={200: "Task queued successfully"},
+    )
+    @action(detail=False, methods=["post"], permission_classes=[IsAuthenticated])
+    def ai_task(self, request):
+        """
+        Endpoint to queue tasks for AI Client side to run
+        """
+        data = request.data
+        serializer = TaskSerializer(data=data)
+
+        try:
+            serializer.is_valid(raise_exception=True)
+        except Exception as e:
+            logger.error(f"Error validating task request: {e}")
+            return Response(
+                {"error": f"Error validating task request: {e}"},
+                status=status.HTTP_400_BAD_REQUEST,
+            )
+
+        # if track id not set, set up the track id
+        track_id = data.get("track_id", None)
+        logger.info(f"Track ID: {track_id}")
+        # if track_id is not provided, then we need to generate it
+        if track_id is None:
+            track_id = Task.init_track_id(CLUSTER_Q_ETE_CONVERSATION_NAME)
+            logger.info(f"Generated track ID: {track_id}")
+            serializer.validated_data["track_id"] = track_id
+
+        # based on the track cluster name, determine what to do next
+        task_id = ClusterManager.chain_next(
+            track_id=track_id,
+            current_component="init",
+            next_component_params=serializer.validated_data["parameters"],
+            name=data.get("name", None),
+            user=request.user,
+        )
+
+        return Response(
+            {"message": "LLM task queued successfully", "task_id": task_id},
+            status=status.HTTP_200_OK,
+        )
+
+    @swagger_auto_schema(
+        operation_summary="Worker: Get Task",
+        operation_description="Get the task",
+        responses={200: "Task retrieved successfully"},
+    )
+    @action(
+        detail=False,
+        methods=["get"],
+        permission_classes=[IsAuthenticated],
+        url_path="task/(?P<task_name>.+)",
+        url_name="task",
+    )
+    def task(self, request, task_name="all"):
+        """
+        Endpoint to get the task for AI
+        """
+        cool_down_task = 10  # 10 second
+        cool_down_time = datetime.now() - timedelta(seconds=cool_down_task)
+        try:
+            if task_name == "all":
+                task = Task.objects.filter(
+                    result_status="pending", created_at__lte=cool_down_time
+                ).first()
+            else:
+                task = Task.objects.filter(
+                    task_name=task_name,
+                    result_status="pending",
+                    created_at__lte=cool_down_time,
+                ).first()
+            if task is None:
+                return Response(
+                    {"error": f"No pending {task_name} tasks found"},
+                    status=status.HTTP_404_NOT_FOUND,
+                )
+            task.result_status = "started"
+            task.save()
+            task_serializer = TaskSerializer(task)
+            logger.critical(f"Task {task.id} retrieved successfully")
+            return Response(data=task_serializer.data, status=status.HTTP_200_OK)
+        except Task.DoesNotExist:
+            return Response(
+                {"error": f"No pending {task_name} tasks found"},
+                status=status.HTTP_404_NOT_FOUND,
+            )
+
+    # add an endpoint to update the task result
+    @swagger_auto_schema(
+        operation_summary="Worker: Result Update",
+        operation_description="Update the task result",
+        request_body=TaskSerializer,
+        responses={200: "Task result updated successfully"},
+    )
+    @action(
+        detail=True,
+        methods=["post"],
+        permission_classes=[IsAuthenticated],
+        url_path="update_result",
+        url_name="update_result",
+    )
+    def update_result(self, request, pk=None):
+        """
+        Endpoint to update the result of a task.
+        """
+        try:
+            data = request.data
+            task = Task.objects.filter(id=pk).first()
+            if task is None:
+                return Response(
+                    {"error": f"Task with ID {pk} does not exist"},
+                    status=status.HTTP_404_NOT_FOUND,
+                )
+
+            serializer = TaskSerializer(data=data, instance=task, partial=True)
+            serializer.is_valid(raise_exception=True)
+            serializer.save()
+            return Response(
+                {"message": f"Task {task.id} updated successfully"},
+                status=status.HTTP_200_OK,
+            )
+        except Exception as e:
+            logger.error(f"Error updating task result: {e}")
+            logger.exception(e)
+            return Response(
+                {"error": f"Error updating task result: {e}"},
+                status=status.HTTP_400_BAD_REQUEST,
+            )
+
+    @swagger_auto_schema(
+        operation_summary="Worker: Register",
+        operation_description="Register a worker",
+        responses={200: "Worker registered or updated successfully"},
+        request_body=TaskWorkerSerializer,
+    )
+    @action(detail=False, methods=["post"], permission_classes=[IsAuthenticated])
+    def worker(self, request):
+        """
+        Endpoint to register a GPU worker.
+        """
+        data = request.data
+        serializer = TaskWorkerSerializer(data=data)
+        serializer.is_valid(raise_exception=True)
+
+        uuid = data.get("uuid")
+        mac_address = data.get("mac_address")
+        ip_address = data.get("ip_address")
+        task_name = data.get("task_name")
+
+        worker, created = TaskWorker.objects.get_or_create(
+            uuid=uuid,
+            defaults={
+                "mac_address": mac_address,
+                "ip_address": ip_address,
+                "task_name": task_name,
+            },
+        )
+        if not created:
+            worker.mac_address = mac_address
+            worker.ip_address = ip_address
+            worker.save()
+
+        return Response(
+            {"message": f"Worker {uuid} registered or updated successfully"},
+            status=status.HTTP_200_OK,
+        )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ ai_task(request) + +

+ + +
+ +

Endpoint to queue tasks for AI Client side to run

+ +
+ Source code in API/orchestrator/views.py +
27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
@swagger_auto_schema(
+    operation_summary="Queue an AI task",
+    operation_description="This will include LLM, STT, and other AI tasks",
+    request_body=TaskSerializer,
+    responses={200: "Task queued successfully"},
+)
+@action(detail=False, methods=["post"], permission_classes=[IsAuthenticated])
+def ai_task(self, request):
+    """
+    Endpoint to queue tasks for AI Client side to run
+    """
+    data = request.data
+    serializer = TaskSerializer(data=data)
+
+    try:
+        serializer.is_valid(raise_exception=True)
+    except Exception as e:
+        logger.error(f"Error validating task request: {e}")
+        return Response(
+            {"error": f"Error validating task request: {e}"},
+            status=status.HTTP_400_BAD_REQUEST,
+        )
+
+    # if track id not set, set up the track id
+    track_id = data.get("track_id", None)
+    logger.info(f"Track ID: {track_id}")
+    # if track_id is not provided, then we need to generate it
+    if track_id is None:
+        track_id = Task.init_track_id(CLUSTER_Q_ETE_CONVERSATION_NAME)
+        logger.info(f"Generated track ID: {track_id}")
+        serializer.validated_data["track_id"] = track_id
+
+    # based on the track cluster name, determine what to do next
+    task_id = ClusterManager.chain_next(
+        track_id=track_id,
+        current_component="init",
+        next_component_params=serializer.validated_data["parameters"],
+        name=data.get("name", None),
+        user=request.user,
+    )
+
+    return Response(
+        {"message": "LLM task queued successfully", "task_id": task_id},
+        status=status.HTTP_200_OK,
+    )
+
+
+
+ +
+ +
+ + +

+ task(request, task_name='all') + +

+ + +
+ +

Endpoint to get the task for AI

+ +
+ Source code in API/orchestrator/views.py +
 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
@swagger_auto_schema(
+    operation_summary="Worker: Get Task",
+    operation_description="Get the task",
+    responses={200: "Task retrieved successfully"},
+)
+@action(
+    detail=False,
+    methods=["get"],
+    permission_classes=[IsAuthenticated],
+    url_path="task/(?P<task_name>.+)",
+    url_name="task",
+)
+def task(self, request, task_name="all"):
+    """
+    Endpoint to get the task for AI
+    """
+    cool_down_task = 10  # 10 second
+    cool_down_time = datetime.now() - timedelta(seconds=cool_down_task)
+    try:
+        if task_name == "all":
+            task = Task.objects.filter(
+                result_status="pending", created_at__lte=cool_down_time
+            ).first()
+        else:
+            task = Task.objects.filter(
+                task_name=task_name,
+                result_status="pending",
+                created_at__lte=cool_down_time,
+            ).first()
+        if task is None:
+            return Response(
+                {"error": f"No pending {task_name} tasks found"},
+                status=status.HTTP_404_NOT_FOUND,
+            )
+        task.result_status = "started"
+        task.save()
+        task_serializer = TaskSerializer(task)
+        logger.critical(f"Task {task.id} retrieved successfully")
+        return Response(data=task_serializer.data, status=status.HTTP_200_OK)
+    except Task.DoesNotExist:
+        return Response(
+            {"error": f"No pending {task_name} tasks found"},
+            status=status.HTTP_404_NOT_FOUND,
+        )
+
+
+
+ +
+ +
+ + +

+ update_result(request, pk=None) + +

+ + +
+ +

Endpoint to update the result of a task.

+ +
+ Source code in API/orchestrator/views.py +
119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
@swagger_auto_schema(
+    operation_summary="Worker: Result Update",
+    operation_description="Update the task result",
+    request_body=TaskSerializer,
+    responses={200: "Task result updated successfully"},
+)
+@action(
+    detail=True,
+    methods=["post"],
+    permission_classes=[IsAuthenticated],
+    url_path="update_result",
+    url_name="update_result",
+)
+def update_result(self, request, pk=None):
+    """
+    Endpoint to update the result of a task.
+    """
+    try:
+        data = request.data
+        task = Task.objects.filter(id=pk).first()
+        if task is None:
+            return Response(
+                {"error": f"Task with ID {pk} does not exist"},
+                status=status.HTTP_404_NOT_FOUND,
+            )
+
+        serializer = TaskSerializer(data=data, instance=task, partial=True)
+        serializer.is_valid(raise_exception=True)
+        serializer.save()
+        return Response(
+            {"message": f"Task {task.id} updated successfully"},
+            status=status.HTTP_200_OK,
+        )
+    except Exception as e:
+        logger.error(f"Error updating task result: {e}")
+        logger.exception(e)
+        return Response(
+            {"error": f"Error updating task result: {e}"},
+            status=status.HTTP_400_BAD_REQUEST,
+        )
+
+
+
+ +
+ +
+ + +

+ worker(request) + +

+ + +
+ +

Endpoint to register a GPU worker.

+ +
+ Source code in API/orchestrator/views.py +
160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
@swagger_auto_schema(
+    operation_summary="Worker: Register",
+    operation_description="Register a worker",
+    responses={200: "Worker registered or updated successfully"},
+    request_body=TaskWorkerSerializer,
+)
+@action(detail=False, methods=["post"], permission_classes=[IsAuthenticated])
+def worker(self, request):
+    """
+    Endpoint to register a GPU worker.
+    """
+    data = request.data
+    serializer = TaskWorkerSerializer(data=data)
+    serializer.is_valid(raise_exception=True)
+
+    uuid = data.get("uuid")
+    mac_address = data.get("mac_address")
+    ip_address = data.get("ip_address")
+    task_name = data.get("task_name")
+
+    worker, created = TaskWorker.objects.get_or_create(
+        uuid=uuid,
+        defaults={
+            "mac_address": mac_address,
+            "ip_address": ip_address,
+            "task_name": task_name,
+        },
+    )
+    if not created:
+        worker.mac_address = mac_address
+        worker.ip_address = ip_address
+        worker.save()
+
+    return Response(
+        {"message": f"Worker {uuid} registered or updated successfully"},
+        status=status.HTTP_200_OK,
+    )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/main/index.html b/Sources/Agent/main/index.html new file mode 100644 index 00000000..5c09a1fb --- /dev/null +++ b/Sources/Agent/main/index.html @@ -0,0 +1,5980 @@ + + + + + + + + + + + + + + + + + + + + + + + + + main - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

main

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ AIOrchestrator + + +

+ + +
+ + +

This is the AI Orchestrator

+

We will pull the task from the API end +And then based on which type of the task it is, we will send it to the respective handler

+ +
+ Source code in Agent/main.py +
 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
class AIOrchestrator:
+    """
+    This is the AI Orchestrator
+
+    We will pull the task from the API end
+    And then based on which type of the task it is, we will send it to the respective handler
+    """
+
+    def __init__(
+        self,
+        api_domain: str,
+        token: str,
+        task_name: Optional[str] = "all",
+        time_sleep: Optional[float] = 1.5,
+    ):
+        """
+        Initialize the AI Orchestrator
+        Args:
+            api_domain (str): The API Domain
+            token (str): The API Token
+            task_name (str): The task name. Default is "all"
+            time_sleep (float): The time to sleep. Default is 1.5 during each loop
+        """
+        self.uuid = str(uuid.uuid4())
+        self.api_domain = api_domain
+        self.token = token
+        self.task_name = task_name
+        self.api = API(
+            domain=api_domain, token=token, task_name=task_name, uuid=self.uuid
+        )
+        self.api.register_or_update_worker()
+        self.storage_solution = self.api.get_storage_solution()
+        # controller
+        self.counter = 0
+        self.time_sleep = time_sleep
+
+        # first check the authentication of the token valid or not
+        if not self.authenticate_token():
+            raise Exception("Token is not valid")
+
+        if not self.pre_env_check():
+            raise Exception("Pre Environment Check Failed")
+
+        self.speech2text = None
+        self.text2speech = None
+        self.emotion_detection = None
+        self.quantization_llm = None
+        self.hf_llm = None
+        self.general_ml = None
+        self.openai_handler = None
+        self.rag_handler = None
+
+        self.task_name_router = {
+            TaskName.speech2text.value: self.handle_speech2text_task,
+            TaskName.text2speech.value: self.handle_text2speech_task,
+            TaskName.emotion_detection.value: self.handle_emotion_detection_task,
+            TaskName.quantization_llm.value: self.handle_quantization_llm_task,
+            TaskName.hf_llm.value: self.handle_hf_llm_task,
+            TaskName.general_ml.value: self.handle_general_ml_task,
+            TaskName.openai_gpt4o.value: self.handle_openai_task,
+            TaskName.openai_speech2text.value: self.handle_openai_task,
+            TaskName.openai_text2speech.value: self.handle_openai_task,
+            TaskName.openai_gpt4o_text_only.value: self.handle_openai_task,
+            TaskName.openai_gpt_4o_text_and_image.value: self.handle_openai_task,
+            TaskName.rag.value: self.handle_rag_task,
+        }
+
+    def authenticate_token(self):
+        """
+        Authenticate the token
+        Returns:
+            bool: True if the token is valid
+        """
+        return self.api.verify_token()
+
+    def pre_env_check(self):
+        # if task is text 2 speech, check openai key
+        load_dotenv()
+        if self.task_name in ["all", "text2speech"]:
+            # check openai key
+            openai_key = os.getenv("OPENAI_API_KEY")
+            if openai_key is None:
+                # READ from .env, and set it
+                # if it not exists, then return False
+                logger.error("OpenAI API Key is not set")
+                return False
+        if self.task_name in ["all", "hf_llm"]:
+            # check openai key
+            openai_key = os.getenv("HF_TOKEN")
+            if openai_key is None:
+                logger.error("OpenAI HF TOKEN is not set")
+                return False
+        return True
+
+    def run(self):
+        logger.info(f"AI Worker Running UUID: {self.uuid}")
+        while True:
+            self.counter += 1
+            if self.counter % 50 == 0:
+                # report to the cloud that we are still alive
+                logger.info(f"Still alive. Counter: {self.counter}")
+                self.api.register_or_update_worker()
+            try:
+                with timer(logger=logger, message="get_task"):
+                    task = self.api.get_task()
+                # after get the task, then feed it to the model to evaluate the model params
+                if task is None:
+                    logger.info("No task found")
+                    time.sleep(self.time_sleep)
+                    continue
+                self.handle_task(task)
+            # allow it accepts keyboard interrupt
+            except KeyboardInterrupt:
+                logger.info("Keyboard Interrupt")
+                break
+            except Exception as e:
+                logger.exception(e)
+            time.sleep(self.time_sleep)
+
+    def handle_task(self, task: dict):
+        """
+        Handle the task
+        Args:
+            task (dict): The task
+        """
+        task_obj = Task(**task)
+        TimeLogger.log_task(task_obj, "start_task")
+        if task_obj.task_name in self.task_name_router:
+            task_obj = self.task_name_router[task_obj.task_name](task_obj)
+        elif "openai" in task_obj.task_name:
+            task_obj = self.handle_openai_task(task_obj)
+        else:
+            logger.error(f"Unknown task type: {task_obj.task_name}")
+            task_obj.result_status = ResultStatus.failed.value
+            task_obj.description = f"Unknown task type: {task_obj.task_name}"
+        TimeLogger.log_task(task_obj, "end_task")
+        # then update the task status
+        self.api.post_task_result(task_obj)
+
+    def handle_speech2text_task(self, task: Task):
+        """
+        Handle the speech2text task
+        Args:
+            task (Task): The task
+        """
+        if self.speech2text is None:
+            self.speech2text = Speech2Text()
+        task = self.speech2text.handle_task(task)
+        return task
+
+    def handle_text2speech_task(self, task: Task):
+        """
+        Handle the text2speech task
+        Args:
+            task (Task): The task
+        """
+        if self.text2speech is None:
+            self.text2speech = Text2Speech()
+        task = self.text2speech.handle_task(task)
+        return task
+
+    def handle_emotion_detection_task(self, task: Task):
+        """
+        Handle the emotion detection task
+        Args:
+            task (Task): The task
+        """
+        if self.emotion_detection is None:
+            self.emotion_detection = EmotionDetectionHandler()
+        task = self.emotion_detection.handle_task(task)
+        return task
+
+    def handle_quantization_llm_task(self, task: Task):
+        """
+        Handle the quantization llm task
+        Args:
+            task (Task): The task
+        """
+        if self.quantization_llm is None:
+            self.quantization_llm = QuantizationLLM(api=self.api)
+        task = self.quantization_llm.handle_task(task)
+        return task
+
+    def handle_hf_llm_task(self, task: Task):
+        """
+        Handle the hf llm task which will require more time compare to other tasks
+        Args:
+            task (Task): The task
+
+        Returns:
+
+        """
+        if self.hf_llm is None:
+            self.hf_llm = HFLLM()
+        task = self.hf_llm.handle_task(task)
+        return task
+
+    def handle_general_ml_task(self, task: Task):
+        """
+        Handle the general ml task
+        Args:
+            task (Task): The task
+
+        Returns:
+
+        """
+        if self.general_ml is None:
+            self.general_ml = GeneralMLModel()
+        task = self.general_ml.handle_task(task)
+        return task
+
+    def handle_openai_task(self, task: Task):
+        """
+        Handle the openai task
+        Args:
+            task (Task): The task
+
+        Returns:
+
+        """
+        if self.openai_handler is None:
+            self.openai_handler = OpenAIHandler()
+        task = self.openai_handler.handle_task(task)
+        return task
+
+    def handle_rag_task(self, task: Task):
+        """
+        Handle the rag task
+        Args:
+            task (Task): The task
+
+        Returns:
+
+        """
+        if self.rag_handler is None:
+            self.rag_handler = RAGHandler()
+        task = self.rag_handler.handle_task(task)
+        return task
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(api_domain, token, task_name='all', time_sleep=1.5) + +

+ + +
+ +

Initialize the AI Orchestrator +Args: + api_domain (str): The API Domain + token (str): The API Token + task_name (str): The task name. Default is "all" + time_sleep (float): The time to sleep. Default is 1.5 during each loop

+ +
+ Source code in Agent/main.py +
38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
def __init__(
+    self,
+    api_domain: str,
+    token: str,
+    task_name: Optional[str] = "all",
+    time_sleep: Optional[float] = 1.5,
+):
+    """
+    Initialize the AI Orchestrator
+    Args:
+        api_domain (str): The API Domain
+        token (str): The API Token
+        task_name (str): The task name. Default is "all"
+        time_sleep (float): The time to sleep. Default is 1.5 during each loop
+    """
+    self.uuid = str(uuid.uuid4())
+    self.api_domain = api_domain
+    self.token = token
+    self.task_name = task_name
+    self.api = API(
+        domain=api_domain, token=token, task_name=task_name, uuid=self.uuid
+    )
+    self.api.register_or_update_worker()
+    self.storage_solution = self.api.get_storage_solution()
+    # controller
+    self.counter = 0
+    self.time_sleep = time_sleep
+
+    # first check the authentication of the token valid or not
+    if not self.authenticate_token():
+        raise Exception("Token is not valid")
+
+    if not self.pre_env_check():
+        raise Exception("Pre Environment Check Failed")
+
+    self.speech2text = None
+    self.text2speech = None
+    self.emotion_detection = None
+    self.quantization_llm = None
+    self.hf_llm = None
+    self.general_ml = None
+    self.openai_handler = None
+    self.rag_handler = None
+
+    self.task_name_router = {
+        TaskName.speech2text.value: self.handle_speech2text_task,
+        TaskName.text2speech.value: self.handle_text2speech_task,
+        TaskName.emotion_detection.value: self.handle_emotion_detection_task,
+        TaskName.quantization_llm.value: self.handle_quantization_llm_task,
+        TaskName.hf_llm.value: self.handle_hf_llm_task,
+        TaskName.general_ml.value: self.handle_general_ml_task,
+        TaskName.openai_gpt4o.value: self.handle_openai_task,
+        TaskName.openai_speech2text.value: self.handle_openai_task,
+        TaskName.openai_text2speech.value: self.handle_openai_task,
+        TaskName.openai_gpt4o_text_only.value: self.handle_openai_task,
+        TaskName.openai_gpt_4o_text_and_image.value: self.handle_openai_task,
+        TaskName.rag.value: self.handle_rag_task,
+    }
+
+
+
+ +
+ +
+ + +

+ authenticate_token() + +

+ + +
+ +

Authenticate the token +Returns: + bool: True if the token is valid

+ +
+ Source code in Agent/main.py +
 97
+ 98
+ 99
+100
+101
+102
+103
def authenticate_token(self):
+    """
+    Authenticate the token
+    Returns:
+        bool: True if the token is valid
+    """
+    return self.api.verify_token()
+
+
+
+ +
+ +
+ + +

+ handle_emotion_detection_task(task) + +

+ + +
+ +

Handle the emotion detection task +Args: + task (Task): The task

+ +
+ Source code in Agent/main.py +
191
+192
+193
+194
+195
+196
+197
+198
+199
+200
def handle_emotion_detection_task(self, task: Task):
+    """
+    Handle the emotion detection task
+    Args:
+        task (Task): The task
+    """
+    if self.emotion_detection is None:
+        self.emotion_detection = EmotionDetectionHandler()
+    task = self.emotion_detection.handle_task(task)
+    return task
+
+
+
+ +
+ +
+ + +

+ handle_general_ml_task(task) + +

+ + +
+ +

Handle the general ml task +Args: + task (Task): The task

+

Returns:

+ +
+ Source code in Agent/main.py +
227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
def handle_general_ml_task(self, task: Task):
+    """
+    Handle the general ml task
+    Args:
+        task (Task): The task
+
+    Returns:
+
+    """
+    if self.general_ml is None:
+        self.general_ml = GeneralMLModel()
+    task = self.general_ml.handle_task(task)
+    return task
+
+
+
+ +
+ +
+ + +

+ handle_hf_llm_task(task) + +

+ + +
+ +

Handle the hf llm task which will require more time compare to other tasks +Args: + task (Task): The task

+

Returns:

+ +
+ Source code in Agent/main.py +
213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
def handle_hf_llm_task(self, task: Task):
+    """
+    Handle the hf llm task which will require more time compare to other tasks
+    Args:
+        task (Task): The task
+
+    Returns:
+
+    """
+    if self.hf_llm is None:
+        self.hf_llm = HFLLM()
+    task = self.hf_llm.handle_task(task)
+    return task
+
+
+
+ +
+ +
+ + +

+ handle_openai_task(task) + +

+ + +
+ +

Handle the openai task +Args: + task (Task): The task

+

Returns:

+ +
+ Source code in Agent/main.py +
241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
def handle_openai_task(self, task: Task):
+    """
+    Handle the openai task
+    Args:
+        task (Task): The task
+
+    Returns:
+
+    """
+    if self.openai_handler is None:
+        self.openai_handler = OpenAIHandler()
+    task = self.openai_handler.handle_task(task)
+    return task
+
+
+
+ +
+ +
+ + +

+ handle_quantization_llm_task(task) + +

+ + +
+ +

Handle the quantization llm task +Args: + task (Task): The task

+ +
+ Source code in Agent/main.py +
202
+203
+204
+205
+206
+207
+208
+209
+210
+211
def handle_quantization_llm_task(self, task: Task):
+    """
+    Handle the quantization llm task
+    Args:
+        task (Task): The task
+    """
+    if self.quantization_llm is None:
+        self.quantization_llm = QuantizationLLM(api=self.api)
+    task = self.quantization_llm.handle_task(task)
+    return task
+
+
+
+ +
+ +
+ + +

+ handle_rag_task(task) + +

+ + +
+ +

Handle the rag task +Args: + task (Task): The task

+

Returns:

+ +
+ Source code in Agent/main.py +
255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
def handle_rag_task(self, task: Task):
+    """
+    Handle the rag task
+    Args:
+        task (Task): The task
+
+    Returns:
+
+    """
+    if self.rag_handler is None:
+        self.rag_handler = RAGHandler()
+    task = self.rag_handler.handle_task(task)
+    return task
+
+
+
+ +
+ +
+ + +

+ handle_speech2text_task(task) + +

+ + +
+ +

Handle the speech2text task +Args: + task (Task): The task

+ +
+ Source code in Agent/main.py +
169
+170
+171
+172
+173
+174
+175
+176
+177
+178
def handle_speech2text_task(self, task: Task):
+    """
+    Handle the speech2text task
+    Args:
+        task (Task): The task
+    """
+    if self.speech2text is None:
+        self.speech2text = Speech2Text()
+    task = self.speech2text.handle_task(task)
+    return task
+
+
+
+ +
+ +
+ + +

+ handle_task(task) + +

+ + +
+ +

Handle the task +Args: + task (dict): The task

+ +
+ Source code in Agent/main.py +
149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
def handle_task(self, task: dict):
+    """
+    Handle the task
+    Args:
+        task (dict): The task
+    """
+    task_obj = Task(**task)
+    TimeLogger.log_task(task_obj, "start_task")
+    if task_obj.task_name in self.task_name_router:
+        task_obj = self.task_name_router[task_obj.task_name](task_obj)
+    elif "openai" in task_obj.task_name:
+        task_obj = self.handle_openai_task(task_obj)
+    else:
+        logger.error(f"Unknown task type: {task_obj.task_name}")
+        task_obj.result_status = ResultStatus.failed.value
+        task_obj.description = f"Unknown task type: {task_obj.task_name}"
+    TimeLogger.log_task(task_obj, "end_task")
+    # then update the task status
+    self.api.post_task_result(task_obj)
+
+
+
+ +
+ +
+ + +

+ handle_text2speech_task(task) + +

+ + +
+ +

Handle the text2speech task +Args: + task (Task): The task

+ +
+ Source code in Agent/main.py +
180
+181
+182
+183
+184
+185
+186
+187
+188
+189
def handle_text2speech_task(self, task: Task):
+    """
+    Handle the text2speech task
+    Args:
+        task (Task): The task
+    """
+    if self.text2speech is None:
+        self.text2speech = Text2Speech()
+    task = self.text2speech.handle_task(task)
+    return task
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/models/parameters/index.html b/Sources/Agent/models/parameters/index.html new file mode 100644 index 00000000..62ca9902 --- /dev/null +++ b/Sources/Agent/models/parameters/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Parameters - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Parameters

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/models/results/index.html b/Sources/Agent/models/results/index.html new file mode 100644 index 00000000..941edb05 --- /dev/null +++ b/Sources/Agent/models/results/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Results - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Results

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/models/task/index.html b/Sources/Agent/models/task/index.html new file mode 100644 index 00000000..a8c48ac7 --- /dev/null +++ b/Sources/Agent/models/task/index.html @@ -0,0 +1,4746 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Task - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Task

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Task + + +

+ + +
+

+ Bases: BaseModel

+ + +

The Task Model +This is the one we will pull and ask for the task from the API

+ +
+ Source code in Agent/models/task.py +
38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
class Task(BaseModel):
+    """
+    The Task Model
+    This is the one we will pull and ask for the task from the API
+    """
+
+    id: int = Field(description="The ID of the task")
+    name: str = Field(description="A unique name to track the cluster of tasks")
+    user_id: Optional[int] = Field(
+        None, description="The ID of the user who created the task"
+    )
+    task_name: TaskName = Field(description="The name of the task")
+    parameters: dict = Field(
+        default_factory=dict, description="The parameters for the task"
+    )
+    result_status: ResultStatus = Field(
+        ResultStatus.pending, description="The status of the task"
+    )
+    result_json: TaskResultJSON = Field(
+        default_factory=lambda: TaskResultJSON(result_profile={}, latency_profile={}),
+        description="The result of the task",
+    )
+    description: Optional[str] = Field(
+        None, description="The description of the task result"
+    )
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/models/track_type/index.html b/Sources/Agent/models/track_type/index.html new file mode 100644 index 00000000..6dc718d1 --- /dev/null +++ b/Sources/Agent/models/track_type/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + TrackType - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

TrackType

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/emotion_detection/features_extraction/index.html b/Sources/Agent/modules/emotion_detection/features_extraction/index.html new file mode 100644 index 00000000..dc315a2c --- /dev/null +++ b/Sources/Agent/modules/emotion_detection/features_extraction/index.html @@ -0,0 +1,5321 @@ + + + + + + + + + + + + + + + + + + + + + + + + + FeaturesExtraction - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

FeaturesExtraction

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ FeaturesExtractor + + +

+ + +
+ + +
+ Source code in Agent/modules/emotion_detection/features_extraction.py +
 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
class FeaturesExtractor:
+    def __init__(self) -> None:
+        self.padding_mode = "zeros"
+        self.padding_location = "back"
+
+    @staticmethod
+    def get_audio_embedding(audios: List[str]) -> torch.Tensor:
+        """Extracts and returns average audio features from a list of audio files."""
+        features = []
+        for audio_path in audios:
+            y, sr = librosa.load(audio_path)
+            hop_length = 512
+            f0 = librosa.feature.zero_crossing_rate(y, hop_length=hop_length).T
+            mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, htk=True).T
+            cqt = librosa.feature.chroma_cqt(y=y, sr=sr, hop_length=hop_length).T
+            temp_feature = np.concatenate([f0, mfcc, cqt], axis=-1)
+            features.append(temp_feature)
+        feature = np.mean(np.concatenate(features), axis=0).reshape(1, -1)
+        # get them into tensor
+        feature = torch.tensor(feature).float()
+        return feature
+
+    def get_images_tensor(self, images: List[np.ndarray]) -> torch.Tensor:
+        """Extracts features from a list of images using a specified model."""
+        model_name = "OpenFace"
+        image_features = [
+            self.represent(image, model_name=model_name)[0]["embedding"]
+            for image in images
+        ]
+        return torch.tensor(image_features)
+
+    def represent(
+        self,
+        img,
+        model_name: str = "VGG-Face",
+        enforce_detection: bool = False,
+        detector_backend: str = "opencv",
+        align: bool = True,
+        expand_percentage: int = 0,
+        normalization: str = "base",
+    ) -> List[Dict[str, Any]]:
+        resp_objs = []
+
+        model: FacialRecognition = modeling.build_model(model_name)
+
+        # ---------------------------------
+        # we have run pre-process in verification. so, this can be skipped if it is coming from verifying.
+        target_size = model.input_shape
+        if detector_backend != "skip":
+            img_objs = self.extract_faces(
+                img,
+                target_size=(target_size[1], target_size[0]),
+                detector_backend=detector_backend,
+                grayscale=False,
+                enforce_detection=enforce_detection,
+                align=align,
+                expand_percentage=expand_percentage,
+            )
+        else:  # skip
+            # --------------------------------
+            if len(img.shape) == 4:
+                img = img[0]  # e.g. (1, 224, 224, 3) to (224, 224, 3)
+            if len(img.shape) == 3:
+                img = cv2.resize(img, target_size)
+                img = np.expand_dims(img, axis=0)
+                # When called from verifying, this is already normalized. But needed when user given.
+                if img.max() > 1:
+                    img = (img.astype(np.float32) / 255.0).astype(np.float32)
+            # --------------------------------
+            # make dummy region and confidence to keep compatibility with `extract_faces`
+            img_objs = [
+                {
+                    "face": img,
+                    "facial_area": {
+                        "x": 0,
+                        "y": 0,
+                        "w": img.shape[1],
+                        "h": img.shape[2],
+                    },
+                    "confidence": 0,
+                }
+            ]
+        # ---------------------------------
+
+        for img_obj in img_objs:
+            img = img_obj["face"]
+            region = img_obj["facial_area"]
+            confidence = img_obj["confidence"]
+            # custom normalization
+            img = preprocessing.normalize_input(img=img, normalization=normalization)
+
+            embedding = model.find_embeddings(img)
+
+            resp_obj = {
+                "embedding": embedding,
+                "facial_area": region,
+                "face_confidence": confidence,
+            }
+            resp_objs.append(resp_obj)
+
+        return resp_objs
+
+    logger = Logger(module="deepface/modules/detection.py")
+
+    @staticmethod
+    def extract_faces(
+        img,
+        target_size: Optional[Tuple[int, int]] = (224, 224),
+        detector_backend: str = "opencv",
+        enforce_detection: bool = True,
+        align: bool = False,
+        expand_percentage: int = 0.2,
+        grayscale: bool = False,
+        human_readable=False,
+    ) -> List[Dict[str, Any]]:
+        resp_objs = []
+        base_region = FacialAreaRegion(
+            x=0, y=0, w=img.shape[1], h=img.shape[0], confidence=0
+        )
+
+        if detector_backend == "skip":
+            face_objs = [DetectedFace(img=img, facial_area=base_region, confidence=0)]
+        else:
+            face_objs = DetectorWrapper.detect_faces(
+                detector_backend=detector_backend,
+                img=img,
+                align=align,
+                expand_percentage=expand_percentage,
+            )
+        # logger.info(f"Detected {len(face_objs)} faces.")
+        # in case of no face found
+        if len(face_objs) == 0 and enforce_detection is True:
+            raise ValueError(
+                "Face could not be detected. Please confirm that the picture is a face photo "
+                "or consider to set enforce_detection param to False."
+            )
+
+        if len(face_objs) == 0 and enforce_detection is False:
+            face_objs = [DetectedFace(img=img, facial_area=base_region, confidence=0)]
+
+        for face_obj in face_objs:
+            current_img = face_obj.img
+            current_region = face_obj.facial_area
+
+            if current_img.shape[0] == 0 or current_img.shape[1] == 0:
+                continue
+
+            if grayscale is True:
+                current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)
+
+            # resize and padding
+            if target_size is not None:
+                factor_0 = target_size[0] / current_img.shape[0]
+                factor_1 = target_size[1] / current_img.shape[1]
+                factor = min(factor_0, factor_1)
+
+                dsize = (
+                    int(current_img.shape[1] * factor),
+                    int(current_img.shape[0] * factor),
+                )
+                current_img = cv2.resize(current_img, dsize)
+
+                diff_0 = target_size[0] - current_img.shape[0]
+                diff_1 = target_size[1] - current_img.shape[1]
+                if grayscale is False:
+                    # Put the base image in the middle of the padded image
+                    current_img = np.pad(
+                        current_img,
+                        (
+                            (diff_0 // 2, diff_0 - diff_0 // 2),
+                            (diff_1 // 2, diff_1 - diff_1 // 2),
+                            (0, 0),
+                        ),
+                        "constant",
+                    )
+                else:
+                    current_img = np.pad(
+                        current_img,
+                        (
+                            (diff_0 // 2, diff_0 - diff_0 // 2),
+                            (diff_1 // 2, diff_1 - diff_1 // 2),
+                        ),
+                        "constant",
+                    )
+
+                # double check: if target image is not still the same size with target.
+                if current_img.shape[0:2] != target_size:
+                    current_img = cv2.resize(current_img, target_size)
+
+            # normalizing the image pixels
+            # what this line doing? must?
+            img_pixels = image.img_to_array(current_img)
+            img_pixels = np.expand_dims(img_pixels, axis=0)
+            img_pixels /= 255  # normalize input in [0, 1]
+            # discard expanded dimension
+            if human_readable is True and len(img_pixels.shape) == 4:
+                img_pixels = img_pixels[0]
+
+            resp_objs.append(
+                {
+                    "face": (
+                        img_pixels[:, :, ::-1] if human_readable is True else img_pixels
+                    ),
+                    "facial_area": {
+                        "x": int(current_region.x),
+                        "y": int(current_region.y),
+                        "w": int(current_region.w),
+                        "h": int(current_region.h),
+                        "left_eye": current_region.left_eye,
+                        "right_eye": current_region.right_eye,
+                    },
+                    "confidence": round(current_region.confidence, 2),
+                }
+            )
+
+        if len(resp_objs) == 0 and enforce_detection is True:
+            raise ValueError(
+                "Exception while extracting faces from ...."
+                "Consider to set enforce_detection arg to False."
+            )
+
+        return resp_objs
+
+    @staticmethod
+    def align_face(
+        img: np.ndarray,
+        left_eye: Union[list, tuple],
+        right_eye: Union[list, tuple],
+    ) -> Tuple[np.ndarray, float]:
+        # if eye could not be detected for the given image, return the image itself
+        if left_eye is None or right_eye is None:
+            return img, 0
+
+        # sometimes unexpectedly detected images come with nil dimensions
+        if img.shape[0] == 0 or img.shape[1] == 0:
+            return img, 0
+
+        angle = float(
+            np.degrees(
+                np.arctan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0])
+            )
+        )
+        img = np.array(Image.fromarray(img).rotate(angle))
+        return img, angle
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ get_audio_embedding(audios) + + + staticmethod + + +

+ + +
+ +

Extracts and returns average audio features from a list of audio files.

+ +
+ Source code in Agent/modules/emotion_detection/features_extraction.py +
25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
@staticmethod
+def get_audio_embedding(audios: List[str]) -> torch.Tensor:
+    """Extracts and returns average audio features from a list of audio files."""
+    features = []
+    for audio_path in audios:
+        y, sr = librosa.load(audio_path)
+        hop_length = 512
+        f0 = librosa.feature.zero_crossing_rate(y, hop_length=hop_length).T
+        mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, htk=True).T
+        cqt = librosa.feature.chroma_cqt(y=y, sr=sr, hop_length=hop_length).T
+        temp_feature = np.concatenate([f0, mfcc, cqt], axis=-1)
+        features.append(temp_feature)
+    feature = np.mean(np.concatenate(features), axis=0).reshape(1, -1)
+    # get them into tensor
+    feature = torch.tensor(feature).float()
+    return feature
+
+
+
+ +
+ +
+ + +

+ get_images_tensor(images) + +

+ + +
+ +

Extracts features from a list of images using a specified model.

+ +
+ Source code in Agent/modules/emotion_detection/features_extraction.py +
42
+43
+44
+45
+46
+47
+48
+49
def get_images_tensor(self, images: List[np.ndarray]) -> torch.Tensor:
+    """Extracts features from a list of images using a specified model."""
+    model_name = "OpenFace"
+    image_features = [
+        self.represent(image, model_name=model_name)[0]["embedding"]
+        for image in images
+    ]
+    return torch.tensor(image_features)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/emotion_detection/handler/index.html b/Sources/Agent/modules/emotion_detection/handler/index.html new file mode 100644 index 00000000..1c87b6b1 --- /dev/null +++ b/Sources/Agent/modules/emotion_detection/handler/index.html @@ -0,0 +1,5360 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Handler - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Handler

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ EmotionDetectionHandler + + +

+ + +
+ + +
+ Source code in Agent/modules/emotion_detection/handler.py +
 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
class EmotionDetectionHandler:
+
+    def handle_task(self, task: Task) -> Optional[Task]:
+        """
+        Handle the task
+        Args:
+            task (Task): The task to handle
+        Returns:
+            The task with the result
+        """
+        emotion_detection_parameters = EmotionDetectionParameters(**task.parameters)
+        text = emotion_detection_parameters.text
+        audio_file = emotion_detection_parameters.audio_file
+        images_path_list = emotion_detection_parameters.images_path_list
+
+        logger.info(f"Text: {text}")
+        logger.info(f"Audio: {audio_file}")
+        logger.info(f"Images: {len(images_path_list)}")
+        TimeLogger.log_task(task, "start_trigger_emotion_model")
+        result_profile, latency_profile = self.trigger_model(
+            text, [audio_file], images_path_list
+        )
+        TimeLogger.log_task(task, "end_trigger_emotion_model")
+        task.result_status = ResultStatus.completed.value
+        task.result_json.result_profile.update(result_profile)
+        task.result_json.latency_profile.update(latency_profile)
+        return task
+
+    @staticmethod
+    def trigger_model(
+        text: str, audio_paths: List[str], images_paths: List[str]
+    ) -> Tuple[dict, dict]:
+        """
+
+        Args:
+            text (str): The text to analyze for emotion
+            audio_paths (List[str]): The audio data to analyze for emotion
+            images_paths (List[str]): The images data to analyze for emotion
+
+        Returns:
+
+        """
+        result_profile = {}
+        latency_profile = {}
+
+        if not text or not audio_paths or not images_paths:
+            logger.error("No text or audio or images provided")
+            logger.error(
+                f"text: {text is None}, audio: {audio_paths is None}, images: {images_paths is None}"
+            )
+            return {}, {}
+        # audio is the file path
+        # same as the images, we need to read the images first
+        audio = []
+        for audio_path in audio_paths:
+            audio.append((CLIENT_DATA_FOLDER / audio_path).as_posix())
+
+        start_time = datetime.now()
+        # read the images
+        images = []
+        for images_path in images_paths:
+            folder = CLIENT_DATA_FOLDER / images_path
+            if not folder.exists():
+                continue
+            # Time Killer
+            for image_file in folder.iterdir():
+                image = cv2.imread(image_file.as_posix())
+                images.append(image)
+        latency_profile["io_images_read"] = (
+            datetime.now() - start_time
+        ).total_seconds()
+
+        # 1. get the features with bert cn model
+        with time_tracker(
+            "feature_extraction", latency_profile, track_type=TrackType.MODEL.value
+        ):
+            features_extractor = FeaturesExtractor()
+            feature_video = (
+                features_extractor.get_images_tensor(images)
+                if images is not None
+                else None
+            )  # (n/5,709)
+            feature_audio = (
+                features_extractor.get_audio_embedding(audio)
+                if audio is not None
+                else None
+            )  # (94,33)
+
+        (
+            logger.info(f"feature_video: {feature_video.shape}")
+            if feature_video is not None
+            else logger.info("feature_video: there are no information about video")
+        )
+        (
+            logger.info(f"feature_audio: {feature_audio.shape}")
+            if feature_audio is not None
+            else logger.info("feature_audio: there are no information about audio")
+        )
+
+        # data is ready
+        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+        model = SentimentAnalysis().to(device)
+        # load the model
+        with time_tracker(
+            "load_model", latency_profile, track_type=TrackType.MODEL.value
+        ):
+            model.load_state_dict(
+                {
+                    k.replace("Model.", ""): v
+                    for k, v in torch.load(models_dir / "sa_sims.pth").items()
+                },
+                strict=True,
+            )
+
+            model.eval()
+
+        # run model
+        with time_tracker("infer", latency_profile, track_type=TrackType.MODEL.value):
+            output = model(text, feature_audio, feature_video)
+
+        logger.critical(f"output: {output}")
+        # loop the output dict, get all of them into float
+        for k, v in output.items():
+            output[k] = float(v)
+            # and get it to decimal 2
+            output[k] = round(output[k], 2)
+        multi_modal_output = output.get("M", 0)
+        result_profile["multi_modal_output"] = output
+        logger.critical(f"multi_modal_output: {multi_modal_output}")
+        return result_profile, latency_profile
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ handle_task(task) + +

+ + +
+ +

Handle the task +Args: + task (Task): The task to handle +Returns: + The task with the result

+ +
+ Source code in Agent/modules/emotion_detection/handler.py +
30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
def handle_task(self, task: Task) -> Optional[Task]:
+    """
+    Handle the task
+    Args:
+        task (Task): The task to handle
+    Returns:
+        The task with the result
+    """
+    emotion_detection_parameters = EmotionDetectionParameters(**task.parameters)
+    text = emotion_detection_parameters.text
+    audio_file = emotion_detection_parameters.audio_file
+    images_path_list = emotion_detection_parameters.images_path_list
+
+    logger.info(f"Text: {text}")
+    logger.info(f"Audio: {audio_file}")
+    logger.info(f"Images: {len(images_path_list)}")
+    TimeLogger.log_task(task, "start_trigger_emotion_model")
+    result_profile, latency_profile = self.trigger_model(
+        text, [audio_file], images_path_list
+    )
+    TimeLogger.log_task(task, "end_trigger_emotion_model")
+    task.result_status = ResultStatus.completed.value
+    task.result_json.result_profile.update(result_profile)
+    task.result_json.latency_profile.update(latency_profile)
+    return task
+
+
+
+ +
+ +
+ + +

+ trigger_model(text, audio_paths, images_paths) + + + staticmethod + + +

+ + +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
text + str + +
+

The text to analyze for emotion

+
+
+ required +
audio_paths + List[str] + +
+

The audio data to analyze for emotion

+
+
+ required +
images_paths + List[str] + +
+

The images data to analyze for emotion

+
+
+ required +
+

Returns:

+ +
+ Source code in Agent/modules/emotion_detection/handler.py +
 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
@staticmethod
+def trigger_model(
+    text: str, audio_paths: List[str], images_paths: List[str]
+) -> Tuple[dict, dict]:
+    """
+
+    Args:
+        text (str): The text to analyze for emotion
+        audio_paths (List[str]): The audio data to analyze for emotion
+        images_paths (List[str]): The images data to analyze for emotion
+
+    Returns:
+
+    """
+    result_profile = {}
+    latency_profile = {}
+
+    if not text or not audio_paths or not images_paths:
+        logger.error("No text or audio or images provided")
+        logger.error(
+            f"text: {text is None}, audio: {audio_paths is None}, images: {images_paths is None}"
+        )
+        return {}, {}
+    # audio is the file path
+    # same as the images, we need to read the images first
+    audio = []
+    for audio_path in audio_paths:
+        audio.append((CLIENT_DATA_FOLDER / audio_path).as_posix())
+
+    start_time = datetime.now()
+    # read the images
+    images = []
+    for images_path in images_paths:
+        folder = CLIENT_DATA_FOLDER / images_path
+        if not folder.exists():
+            continue
+        # Time Killer
+        for image_file in folder.iterdir():
+            image = cv2.imread(image_file.as_posix())
+            images.append(image)
+    latency_profile["io_images_read"] = (
+        datetime.now() - start_time
+    ).total_seconds()
+
+    # 1. get the features with bert cn model
+    with time_tracker(
+        "feature_extraction", latency_profile, track_type=TrackType.MODEL.value
+    ):
+        features_extractor = FeaturesExtractor()
+        feature_video = (
+            features_extractor.get_images_tensor(images)
+            if images is not None
+            else None
+        )  # (n/5,709)
+        feature_audio = (
+            features_extractor.get_audio_embedding(audio)
+            if audio is not None
+            else None
+        )  # (94,33)
+
+    (
+        logger.info(f"feature_video: {feature_video.shape}")
+        if feature_video is not None
+        else logger.info("feature_video: there are no information about video")
+    )
+    (
+        logger.info(f"feature_audio: {feature_audio.shape}")
+        if feature_audio is not None
+        else logger.info("feature_audio: there are no information about audio")
+    )
+
+    # data is ready
+    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+    model = SentimentAnalysis().to(device)
+    # load the model
+    with time_tracker(
+        "load_model", latency_profile, track_type=TrackType.MODEL.value
+    ):
+        model.load_state_dict(
+            {
+                k.replace("Model.", ""): v
+                for k, v in torch.load(models_dir / "sa_sims.pth").items()
+            },
+            strict=True,
+        )
+
+        model.eval()
+
+    # run model
+    with time_tracker("infer", latency_profile, track_type=TrackType.MODEL.value):
+        output = model(text, feature_audio, feature_video)
+
+    logger.critical(f"output: {output}")
+    # loop the output dict, get all of them into float
+    for k, v in output.items():
+        output[k] = float(v)
+        # and get it to decimal 2
+        output[k] = round(output[k], 2)
+    multi_modal_output = output.get("M", 0)
+    result_profile["multi_modal_output"] = output
+    logger.critical(f"multi_modal_output: {multi_modal_output}")
+    return result_profile, latency_profile
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/emotion_detection/sentiment/index.html b/Sources/Agent/modules/emotion_detection/sentiment/index.html new file mode 100644 index 00000000..6c354710 --- /dev/null +++ b/Sources/Agent/modules/emotion_detection/sentiment/index.html @@ -0,0 +1,5093 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Sentiment - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Sentiment

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ BertTextEncoder + + +

+ + +
+

+ Bases: Module

+ + +
+ Source code in Agent/modules/emotion_detection/sentiment.py +
177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
class BertTextEncoder(nn.Module):
+    def __init__(self, language="en", use_finetune=False):
+        """
+        language: en / cn
+        """
+        super(BertTextEncoder, self).__init__()
+
+        assert language in ["en", "cn"]
+
+        tokenizer_class = BertTokenizer
+        model_class = BertModel
+        # directory is fine
+        # pretrained_weights = '/home/sharing/disk3/pretrained_embedding/Chinese/bert/pytorch'
+        if language == "en":
+            self.tokenizer = tokenizer_class.from_pretrained(
+                f"{models_dir}/bert_en", do_lower_case=True
+            )
+            self.model = model_class.from_pretrained(f"{models_dir}/bert_en")
+        elif language == "cn":
+            self.tokenizer = tokenizer_class.from_pretrained(f"{models_dir}/bert_cn")
+            self.model = model_class.from_pretrained(f"{models_dir}/bert_cn")
+
+        self.use_finetune = use_finetune
+
+    def get_tokenizer(self):
+        return self.tokenizer
+
+    def from_text(self, text):
+        """
+        text: raw data
+        """
+        input_ids = self.get_id(text)
+        with torch.no_grad():
+            last_hidden_states = self.model(input_ids)[
+                0
+            ]  # Models outputs are now tuples
+        return last_hidden_states.squeeze()
+
+    def forward(self, text):
+        """
+        text: (batch_size, 3, seq_len)
+        3: input_ids, input_mask, segment_ids
+        input_ids: input_ids,
+        input_mask: attention_mask,
+        segment_ids: token_type_ids
+        """
+        text = self.tokenizer(text)
+        input_ids, input_mask, segment_ids = (
+            torch.tensor(text["input_ids"]).long().unsqueeze(0),
+            torch.tensor(text["token_type_ids"]).unsqueeze(0).float(),
+            torch.tensor(text["attention_mask"]).unsqueeze(0).long(),
+        )
+        if self.use_finetune:
+            last_hidden_states = self.model(
+                input_ids=input_ids,
+                attention_mask=input_mask,
+                token_type_ids=segment_ids,
+            )[
+                0
+            ]  # Models outputs are now tuples
+        else:
+            with torch.no_grad():
+                last_hidden_states = self.model(
+                    input_ids=input_ids,
+                    attention_mask=input_mask,
+                    token_type_ids=segment_ids,
+                )[
+                    0
+                ]  # Models outputs are now tuples
+        return last_hidden_states
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(language='en', use_finetune=False) + +

+ + +
+ +

language: en / cn

+ +
+ Source code in Agent/modules/emotion_detection/sentiment.py +
178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
def __init__(self, language="en", use_finetune=False):
+    """
+    language: en / cn
+    """
+    super(BertTextEncoder, self).__init__()
+
+    assert language in ["en", "cn"]
+
+    tokenizer_class = BertTokenizer
+    model_class = BertModel
+    # directory is fine
+    # pretrained_weights = '/home/sharing/disk3/pretrained_embedding/Chinese/bert/pytorch'
+    if language == "en":
+        self.tokenizer = tokenizer_class.from_pretrained(
+            f"{models_dir}/bert_en", do_lower_case=True
+        )
+        self.model = model_class.from_pretrained(f"{models_dir}/bert_en")
+    elif language == "cn":
+        self.tokenizer = tokenizer_class.from_pretrained(f"{models_dir}/bert_cn")
+        self.model = model_class.from_pretrained(f"{models_dir}/bert_cn")
+
+    self.use_finetune = use_finetune
+
+
+
+ +
+ +
+ + +

+ forward(text) + +

+ + +
+ +

text: (batch_size, 3, seq_len) +3: input_ids, input_mask, segment_ids +input_ids: input_ids, +input_mask: attention_mask, +segment_ids: token_type_ids

+ +
+ Source code in Agent/modules/emotion_detection/sentiment.py +
215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
def forward(self, text):
+    """
+    text: (batch_size, 3, seq_len)
+    3: input_ids, input_mask, segment_ids
+    input_ids: input_ids,
+    input_mask: attention_mask,
+    segment_ids: token_type_ids
+    """
+    text = self.tokenizer(text)
+    input_ids, input_mask, segment_ids = (
+        torch.tensor(text["input_ids"]).long().unsqueeze(0),
+        torch.tensor(text["token_type_ids"]).unsqueeze(0).float(),
+        torch.tensor(text["attention_mask"]).unsqueeze(0).long(),
+    )
+    if self.use_finetune:
+        last_hidden_states = self.model(
+            input_ids=input_ids,
+            attention_mask=input_mask,
+            token_type_ids=segment_ids,
+        )[
+            0
+        ]  # Models outputs are now tuples
+    else:
+        with torch.no_grad():
+            last_hidden_states = self.model(
+                input_ids=input_ids,
+                attention_mask=input_mask,
+                token_type_ids=segment_ids,
+            )[
+                0
+            ]  # Models outputs are now tuples
+    return last_hidden_states
+
+
+
+ +
+ +
+ + +

+ from_text(text) + +

+ + +
+ +

text: raw data

+ +
+ Source code in Agent/modules/emotion_detection/sentiment.py +
204
+205
+206
+207
+208
+209
+210
+211
+212
+213
def from_text(self, text):
+    """
+    text: raw data
+    """
+    input_ids = self.get_id(text)
+    with torch.no_grad():
+        last_hidden_states = self.model(input_ids)[
+            0
+        ]  # Models outputs are now tuples
+    return last_hidden_states.squeeze()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/general_ml/handler/index.html b/Sources/Agent/modules/general_ml/handler/index.html new file mode 100644 index 00000000..23103503 --- /dev/null +++ b/Sources/Agent/modules/general_ml/handler/index.html @@ -0,0 +1,5153 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Handler - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Handler

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ GeneralMLModel + + +

+ + +
+ + +
+ Source code in Agent/modules/general_ml/handler.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
class GeneralMLModel:
+    def __init__(self):
+        self.avail_models = {}
+
+    def handle_task(self, task: Task) -> Task:
+        """
+        Handle the task
+        Args:
+            task (Task): The task to handle
+
+        Returns:
+            Updated task
+        """
+        TimeLogger.log_task(task, "start_general_ml")
+        result_profile = {}
+        latency_profile = {}
+        general_ml_parameters = GeneralMLParameters(**task.parameters)
+        text = general_ml_parameters.text
+        general_model_name = general_ml_parameters.general_model_name
+        params = general_ml_parameters.params
+        if general_model_name not in self.avail_models:
+            logger.error(f"Model {general_model_name} not loaded yet")
+            with time_tracker(
+                "init", latency_profile, track_type=TrackType.MODEL.value
+            ):
+                ml_model = self.load_model(general_model_name)
+                self.avail_models[general_model_name] = ml_model
+
+        else:
+            ml_model = self.avail_models[general_model_name]
+
+        with timer(logger, f"Model infer {general_model_name}"):
+            with time_tracker(
+                "infer", latency_profile, track_type=TrackType.MODEL.value
+            ):
+                res = self.infer(ml_model, general_model_name, text, params)
+        result_profile["result"] = res
+
+        task.result_status = ResultStatus.completed.value
+        task.result_json.result_profile.update(result_profile)
+        task.result_json.latency_profile.update(latency_profile)
+        TimeLogger.log_task(task, "end_general_ml")
+        return task
+
+    @staticmethod
+    def load_model(general_model_name: str):
+        """
+        Load model
+        Args:
+            general_model_name (str): Model name
+
+        Returns:
+
+        """
+        if general_model_name == "sentence_transformer":
+            return SentenceTransformer("all-MiniLM-L6-v2")
+        raise ValueError(f"Model {general_model_name} is not implemented")
+
+    @staticmethod
+    def infer(ml_model, general_model_name: str, text: str, params: dict):
+        """
+        Infer the model
+        Args:
+            ml_model: General model
+            general_model_name (str): Model name
+            text (str): Text
+            params (dict): Model params
+
+        Returns:
+
+        """
+        if general_model_name == "sentence_transformer":
+            result = ml_model.encode(text)
+            return result.tolist()
+        logger.info(params)
+        raise ValueError(f"Model {general_model_name} is not implemented")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ handle_task(task) + +

+ + +
+ +

Handle the task +Args: + task (Task): The task to handle

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Task + +
+

Updated task

+
+
+ +
+ Source code in Agent/modules/general_ml/handler.py +
18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
def handle_task(self, task: Task) -> Task:
+    """
+    Handle the task
+    Args:
+        task (Task): The task to handle
+
+    Returns:
+        Updated task
+    """
+    TimeLogger.log_task(task, "start_general_ml")
+    result_profile = {}
+    latency_profile = {}
+    general_ml_parameters = GeneralMLParameters(**task.parameters)
+    text = general_ml_parameters.text
+    general_model_name = general_ml_parameters.general_model_name
+    params = general_ml_parameters.params
+    if general_model_name not in self.avail_models:
+        logger.error(f"Model {general_model_name} not loaded yet")
+        with time_tracker(
+            "init", latency_profile, track_type=TrackType.MODEL.value
+        ):
+            ml_model = self.load_model(general_model_name)
+            self.avail_models[general_model_name] = ml_model
+
+    else:
+        ml_model = self.avail_models[general_model_name]
+
+    with timer(logger, f"Model infer {general_model_name}"):
+        with time_tracker(
+            "infer", latency_profile, track_type=TrackType.MODEL.value
+        ):
+            res = self.infer(ml_model, general_model_name, text, params)
+    result_profile["result"] = res
+
+    task.result_status = ResultStatus.completed.value
+    task.result_json.result_profile.update(result_profile)
+    task.result_json.latency_profile.update(latency_profile)
+    TimeLogger.log_task(task, "end_general_ml")
+    return task
+
+
+
+ +
+ +
+ + +

+ infer(ml_model, general_model_name, text, params) + + + staticmethod + + +

+ + +
+ +

Infer the model +Args: + ml_model: General model + general_model_name (str): Model name + text (str): Text + params (dict): Model params

+

Returns:

+ +
+ Source code in Agent/modules/general_ml/handler.py +
72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
@staticmethod
+def infer(ml_model, general_model_name: str, text: str, params: dict):
+    """
+    Infer the model
+    Args:
+        ml_model: General model
+        general_model_name (str): Model name
+        text (str): Text
+        params (dict): Model params
+
+    Returns:
+
+    """
+    if general_model_name == "sentence_transformer":
+        result = ml_model.encode(text)
+        return result.tolist()
+    logger.info(params)
+    raise ValueError(f"Model {general_model_name} is not implemented")
+
+
+
+ +
+ +
+ + +

+ load_model(general_model_name) + + + staticmethod + + +

+ + +
+ +

Load model +Args: + general_model_name (str): Model name

+

Returns:

+ +
+ Source code in Agent/modules/general_ml/handler.py +
58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
@staticmethod
+def load_model(general_model_name: str):
+    """
+    Load model
+    Args:
+        general_model_name (str): Model name
+
+    Returns:
+
+    """
+    if general_model_name == "sentence_transformer":
+        return SentenceTransformer("all-MiniLM-L6-v2")
+    raise ValueError(f"Model {general_model_name} is not implemented")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/general_ml/ml_models/index.html b/Sources/Agent/modules/general_ml/ml_models/index.html new file mode 100644 index 00000000..469c36c0 --- /dev/null +++ b/Sources/Agent/modules/general_ml/ml_models/index.html @@ -0,0 +1,4637 @@ + + + + + + + + + + + + + + + + + + + + + + + + + MLModels - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

MLModels

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/hf_llm/handler/index.html b/Sources/Agent/modules/hf_llm/handler/index.html new file mode 100644 index 00000000..1449ea9f --- /dev/null +++ b/Sources/Agent/modules/hf_llm/handler/index.html @@ -0,0 +1,4986 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Handler - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Handler

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ HFLLM + + +

+ + +
+ + +
+ Source code in Agent/modules/hf_llm/handler.py +
17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
class HFLLM:
+    def __init__(self):
+        self.avail_models = {}
+        self.avail_tokenizers = {}
+
+    def handle_task(self, task: Task) -> Task:
+        """
+        Handle the task
+        Args:
+            task (Task): The task to handle
+
+        Returns:
+            Updated task
+        """
+        TimeLogger.log_task(task, "start_hf_llm")
+        result_profile = {}
+        latency_profile = {}
+        hf_parameters = HFParameters(**task.parameters)
+        hf_model_name = hf_parameters.hf_model_name
+        text = hf_parameters.text
+        hf_model = self.avail_models.get(hf_model_name, None)
+        if hf_model is None:
+            device = "cuda" if torch.cuda.is_available() else "cpu"
+            logger.error(f"Model {hf_model_name} not loaded yet")
+            with time_tracker(
+                "init_model", latency_profile, track_type=TrackType.TRANSFER.value
+            ):
+                hf_tokenizer = AutoTokenizer.from_pretrained(hf_model_name)
+                hf_model = AutoModelForCausalLM.from_pretrained(hf_model_name)
+                hf_model.to(device)
+                self.avail_models[hf_model_name] = hf_model
+                self.avail_tokenizers[hf_model_name] = hf_tokenizer
+
+        with timer(logger, f"Model infer {hf_model_name}"):
+            with time_tracker(
+                "infer", latency_profile, track_type=TrackType.MODEL.value
+            ):
+                inputs = self.avail_tokenizers[hf_model_name](
+                    text,
+                    return_tensors="pt",
+                    max_length=1024,
+                    truncation=True,
+                )
+                # to device
+                inputs = {k: v.to(hf_model.device) for k, v in inputs.items()}
+                num_of_tokens = len(inputs["input_ids"][0])
+                res = hf_model.generate(**inputs, max_new_tokens=num_of_tokens + 100)
+                generated_text = self.avail_tokenizers[hf_model_name].decode(
+                    res[0].cpu().tolist(), skip_special_tokens=True
+                )
+        result_profile["text"] = generated_text
+        result_profile["logs"] = res[0].tolist()
+        task.result_status = ResultStatus.completed.value
+        task.result_json.result_profile.update(result_profile)
+        task.result_json.latency_profile.update(latency_profile)
+        TimeLogger.log_task(task, "end_hf_llm")
+        return task
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ handle_task(task) + +

+ + +
+ +

Handle the task +Args: + task (Task): The task to handle

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Task + +
+

Updated task

+
+
+ +
+ Source code in Agent/modules/hf_llm/handler.py +
22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
def handle_task(self, task: Task) -> Task:
+    """
+    Handle the task
+    Args:
+        task (Task): The task to handle
+
+    Returns:
+        Updated task
+    """
+    TimeLogger.log_task(task, "start_hf_llm")
+    result_profile = {}
+    latency_profile = {}
+    hf_parameters = HFParameters(**task.parameters)
+    hf_model_name = hf_parameters.hf_model_name
+    text = hf_parameters.text
+    hf_model = self.avail_models.get(hf_model_name, None)
+    if hf_model is None:
+        device = "cuda" if torch.cuda.is_available() else "cpu"
+        logger.error(f"Model {hf_model_name} not loaded yet")
+        with time_tracker(
+            "init_model", latency_profile, track_type=TrackType.TRANSFER.value
+        ):
+            hf_tokenizer = AutoTokenizer.from_pretrained(hf_model_name)
+            hf_model = AutoModelForCausalLM.from_pretrained(hf_model_name)
+            hf_model.to(device)
+            self.avail_models[hf_model_name] = hf_model
+            self.avail_tokenizers[hf_model_name] = hf_tokenizer
+
+    with timer(logger, f"Model infer {hf_model_name}"):
+        with time_tracker(
+            "infer", latency_profile, track_type=TrackType.MODEL.value
+        ):
+            inputs = self.avail_tokenizers[hf_model_name](
+                text,
+                return_tensors="pt",
+                max_length=1024,
+                truncation=True,
+            )
+            # to device
+            inputs = {k: v.to(hf_model.device) for k, v in inputs.items()}
+            num_of_tokens = len(inputs["input_ids"][0])
+            res = hf_model.generate(**inputs, max_new_tokens=num_of_tokens + 100)
+            generated_text = self.avail_tokenizers[hf_model_name].decode(
+                res[0].cpu().tolist(), skip_special_tokens=True
+            )
+    result_profile["text"] = generated_text
+    result_profile["logs"] = res[0].tolist()
+    task.result_status = ResultStatus.completed.value
+    task.result_json.result_profile.update(result_profile)
+    task.result_json.latency_profile.update(latency_profile)
+    TimeLogger.log_task(task, "end_hf_llm")
+    return task
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/openai/handler/index.html b/Sources/Agent/modules/openai/handler/index.html new file mode 100644 index 00000000..6a4b12ea --- /dev/null +++ b/Sources/Agent/modules/openai/handler/index.html @@ -0,0 +1,6236 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Handler - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + +

Handler

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ OpenAIHandler + + +

+ + +
+ + +
+ Source code in Agent/modules/openai/handler.py +
 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
class OpenAIHandler:
+    def __init__(self):
+        self.client = OpenAI()
+
+    def handle_task(self, task: Task) -> Task:
+        """
+        Handle the task
+        Args:
+            task (Task): The task
+
+        Returns:
+            The task with the result
+        """
+        result_profile = {}
+        latency_profile = {}
+        TimeLogger.log_task(task, "start_openai")
+        if "speech2text" in task.task_name:
+            TimeLogger.log(latency_profile, "start_openai_speech2text")
+            text = self.speech2text(task)
+            TimeLogger.log(latency_profile, "end_openai_speech2text")
+            result_profile["text"] = text
+        if "openai_gpt_4o_text_and_image" in task.task_name:
+            TimeLogger.log(latency_profile, "start_openai_gpt_4o")
+            text = self.gpt_4o_text_and_images(task)
+            TimeLogger.log(latency_profile, "end_openai_gpt_4o")
+            result_profile["text"] = text
+        if "openai_gpt_4o_text_only" in task.task_name:
+            TimeLogger.log(latency_profile, "start_openai_gpt_4o")
+            text = self.gpt_4o_text_only(task)
+            TimeLogger.log(latency_profile, "end_openai_gpt_4o")
+            result_profile["text"] = text
+        if "openai_gpt_35" in task.task_name:
+            TimeLogger.log(latency_profile, "start_openai_gpt_35")
+            text = self.gpt_35(task)
+            TimeLogger.log(latency_profile, "end_openai_gpt_35")
+            result_profile["text"] = text
+        if "text2speech" in task.task_name:
+            TimeLogger.log(latency_profile, "start_openai_text2speech")
+            audio_file_path = self.text2speech(task)
+            TimeLogger.log(latency_profile, "end_openai_text2speech")
+            result_profile["audio_file_path"] = audio_file_path.split("/")[-1]
+        task.result_status = ResultStatus.completed.value
+        task.result_json.result_profile.update(result_profile)
+        task.result_json.latency_profile.update(latency_profile)
+        TimeLogger.log_task(task, "end_openai")
+        return task
+
+    def speech2text(self, task: Task) -> Optional[str]:
+        """
+        Call OpenAI endpoints to convert speech to text
+        Args:
+            task (Task): The path to the audio file
+
+        Returns:
+            str: The transcribed text
+        """
+
+        try:
+            logger.info(task.parameters)
+            params = Speech2TextParameters(**task.parameters)
+            with time_tracker(
+                    "locate_audio_file",
+                    task.result_json.latency_profile,
+                    track_type=TrackType.TRANSFER.value,
+            ):
+                audio_file_path = Speech2Text.locate_audio_file(
+                    params.uid, params.audio_index, params.end_time
+                )
+
+            logger.info(f"Transcribing audio file: {audio_file_path}")
+
+            audio_file_path = Path(audio_file_path)
+            if not audio_file_path.exists():
+                logger.error(f"Audio file {audio_file_path} not found")
+                return None
+            with time_tracker(
+                    "openai_stt",
+                    task.result_json.latency_profile,
+                    track_type=TrackType.MODEL.value,
+            ):
+                with open(audio_file_path, "rb") as audio_file:
+                    res = self.client.audio.transcriptions.create(
+                        model="whisper-1", file=audio_file
+                    )
+
+            text = res.text
+            logger.info(f"Transcription result: {text}")
+            return text
+        except Exception as e:
+            logger.error(f"Error transcribing audio file: {e}")
+        return None
+
+    def gpt_4o_text_only(self, task: Task) -> str:
+        """
+        Get the text only
+        Args:
+            task:
+
+        Returns:
+
+        """
+        params = OpenAIGPT4OTextOnlyParameters(**task.parameters)
+        text = params.text
+        prompt_template = params.prompt_template
+        logger.info(f"Text: {text}")
+        prompt = prompt_template.format(text=text)
+        messages = [
+            {
+                "role": "user",
+                "content": [
+                    {"type": "text", "text": prompt},
+                ],
+            }
+        ]
+
+        with time_tracker(
+                "gpt-4o-call",
+                task.result_json.latency_profile,
+                track_type=TrackType.MODEL.value,
+        ):
+            res = self.client.chat.completions.create(
+                model="gpt-4o",
+                messages=messages,
+            )
+        return res.choices[0].message.content
+
+    def gpt_35(self, task: Task) -> Optional[str]:
+        """
+        Call OpenAI endpoints to convert speech to text
+        Args:
+            task (Task): The path to the audio file
+
+        Returns:
+            str: The transcribed text
+        """
+
+        try:
+            logger.info(task.parameters)
+            params = OpenAIGPT4OTextOnlyParameters(**task.parameters)
+            text = params.text
+            prompt_template = params.prompt_template
+            logger.info(f"Text: {text}")
+            prompt = prompt_template.format(text=text)
+            messages = [
+                {
+                    "role": "user",
+                    "content": [
+                        {"type": "text", "text": prompt},
+                    ],
+                }
+            ]
+            with time_tracker(
+                    "openai_gpt_35",
+                    task.result_json.latency_profile,
+                    track_type=TrackType.MODEL.value,
+            ):
+                res = self.client.chat.completions.create(
+                    model="gpt-3.5-turbo",
+                    messages=messages,
+                )
+
+            return res.choices[0].message.content
+        except Exception as e:
+            logger.error(f"Error locating audio file: {e}")
+            return None
+
+    def gpt_4o_text_and_images(self, task: Task) -> Optional[str]:
+        """
+        Get the text and images
+        And then call the GPT-4o endpoints
+
+        # we need to sample the images as it will be a lot of them
+
+        Args:
+            task (Task): The task
+
+        Returns:
+
+        """
+        params = OpenAIGPT4OParameters(**task.parameters)
+        text = params.text
+        images_path_list = params.images_path_list
+        sample_ratio = params.sample_ratio
+        prompt_template = params.prompt_template
+        logger.info(f"Text: {text}")
+
+        # sample the images
+        # so, we will only get the images for every sample_ratio images
+        logger.info(f"Current length of images: {len(images_path_list)}")
+        logger.debug(images_path_list)
+        images_path_list = images_path_list[::sample_ratio]
+        logger.info(f"Sampled length of images: {len(images_path_list)}")
+
+        # read image data to the one gpt-4o can take, something like data:image/jpeg;base64
+        with time_tracker(
+                label="encode_images",
+                profile=task.result_json.latency_profile,
+                track_type=TrackType.TRANSFER.value,
+        ):
+            images = []
+            for images_path in images_path_list:
+                folder = CLIENT_DATA_FOLDER / images_path
+                if not folder.exists():
+                    continue
+                for image_file in folder.iterdir():
+                    images.append(self.encode_image(image_file))
+        """
+        messages = [
+            {
+              "role": "user",
+              "content": [
+                {
+                  "type": "text",
+                  "text": "What’s in this image?"
+                },
+                {
+                  "type": "image_url",
+                  "image_url": {
+                    "url": f"data:image/jpeg;base64,{base64_image}"
+                  }
+                }
+              ]
+            }
+          ]
+        """
+
+        prompt = prompt_template.format(text=text)
+        messages = [
+            {
+                "role": "user",
+                "content": [
+                    {"type": "text", "text": prompt},
+                ],
+            }
+        ]
+        for image in images:
+            if not image:
+                continue
+            messages[0]["content"].append(
+                {
+                    "type": "image_url",
+                    "image_url": {"url": f"data:image/jpeg;base64,{image}"},
+                }
+            )
+
+        logger.debug(messages)
+        # call gpt-4o
+        with time_tracker(
+                "gpt-4o-call",
+                task.result_json.latency_profile,
+                track_type=TrackType.MODEL.value,
+        ):
+            res = self.client.chat.completions.create(
+                model="gpt-4o",
+                messages=messages,
+            )
+        return res.choices[0].message.content
+
+    @staticmethod
+    def encode_image(image_path):
+        with open(image_path, "rb") as image_file:
+            return base64.b64encode(image_file.read()).decode("utf-8")
+
+    def text2speech(self, task: Task) -> Optional[str]:
+        """
+        Call OpenAI endpoints to convert text to speech
+        Args:
+            task (Task): The text to convert
+
+        Returns:
+
+        """
+        params = Text2SpeechParameters(**task.parameters)
+        text = params.text
+        logger.info(f"Text: {text}")
+        output_audio_file_path = DATA_DIR / "tts" / f"{task.id}.mp3"
+        # if folder does not exist, create it
+        output_audio_file_path.parent.mkdir(parents=True, exist_ok=True)
+        output_audio_file_path = output_audio_file_path.as_posix()
+
+        with time_tracker(
+                "openai_tts",
+                task.result_json.latency_profile,
+                track_type=TrackType.MODEL.value,
+        ):
+            res = self.client.audio.speech.create(
+                model="tts-1",
+                voice="alloy",
+                input=text,
+            )
+        with time_tracker(
+                "save_audio",
+                task.result_json.latency_profile,
+                track_type=TrackType.TRANSFER.value,
+        ):
+            res.stream_to_file(output_audio_file_path)
+        return output_audio_file_path
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ gpt_35(task) + +

+ + +
+ +

Call OpenAI endpoints to convert speech to text +Args: + task (Task): The path to the audio file

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + Optional[str] + +
+

The transcribed text

+
+
+ +
+ Source code in Agent/modules/openai/handler.py +
150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
def gpt_35(self, task: Task) -> Optional[str]:
+    """
+    Call OpenAI endpoints to convert speech to text
+    Args:
+        task (Task): The path to the audio file
+
+    Returns:
+        str: The transcribed text
+    """
+
+    try:
+        logger.info(task.parameters)
+        params = OpenAIGPT4OTextOnlyParameters(**task.parameters)
+        text = params.text
+        prompt_template = params.prompt_template
+        logger.info(f"Text: {text}")
+        prompt = prompt_template.format(text=text)
+        messages = [
+            {
+                "role": "user",
+                "content": [
+                    {"type": "text", "text": prompt},
+                ],
+            }
+        ]
+        with time_tracker(
+                "openai_gpt_35",
+                task.result_json.latency_profile,
+                track_type=TrackType.MODEL.value,
+        ):
+            res = self.client.chat.completions.create(
+                model="gpt-3.5-turbo",
+                messages=messages,
+            )
+
+        return res.choices[0].message.content
+    except Exception as e:
+        logger.error(f"Error locating audio file: {e}")
+        return None
+
+
+
+ +
+ +
+ + +

+ gpt_4o_text_and_images(task) + +

+ + +
+ +

Get the text and images +And then call the GPT-4o endpoints

+

we need to sample the images as it will be a lot of them

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task + Task + +
+

The task

+
+
+ required +
+

Returns:

+ +
+ Source code in Agent/modules/openai/handler.py +
190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
def gpt_4o_text_and_images(self, task: Task) -> Optional[str]:
+    """
+    Get the text and images
+    And then call the GPT-4o endpoints
+
+    # we need to sample the images as it will be a lot of them
+
+    Args:
+        task (Task): The task
+
+    Returns:
+
+    """
+    params = OpenAIGPT4OParameters(**task.parameters)
+    text = params.text
+    images_path_list = params.images_path_list
+    sample_ratio = params.sample_ratio
+    prompt_template = params.prompt_template
+    logger.info(f"Text: {text}")
+
+    # sample the images
+    # so, we will only get the images for every sample_ratio images
+    logger.info(f"Current length of images: {len(images_path_list)}")
+    logger.debug(images_path_list)
+    images_path_list = images_path_list[::sample_ratio]
+    logger.info(f"Sampled length of images: {len(images_path_list)}")
+
+    # read image data to the one gpt-4o can take, something like data:image/jpeg;base64
+    with time_tracker(
+            label="encode_images",
+            profile=task.result_json.latency_profile,
+            track_type=TrackType.TRANSFER.value,
+    ):
+        images = []
+        for images_path in images_path_list:
+            folder = CLIENT_DATA_FOLDER / images_path
+            if not folder.exists():
+                continue
+            for image_file in folder.iterdir():
+                images.append(self.encode_image(image_file))
+    """
+    messages = [
+        {
+          "role": "user",
+          "content": [
+            {
+              "type": "text",
+              "text": "What’s in this image?"
+            },
+            {
+              "type": "image_url",
+              "image_url": {
+                "url": f"data:image/jpeg;base64,{base64_image}"
+              }
+            }
+          ]
+        }
+      ]
+    """
+
+    prompt = prompt_template.format(text=text)
+    messages = [
+        {
+            "role": "user",
+            "content": [
+                {"type": "text", "text": prompt},
+            ],
+        }
+    ]
+    for image in images:
+        if not image:
+            continue
+        messages[0]["content"].append(
+            {
+                "type": "image_url",
+                "image_url": {"url": f"data:image/jpeg;base64,{image}"},
+            }
+        )
+
+    logger.debug(messages)
+    # call gpt-4o
+    with time_tracker(
+            "gpt-4o-call",
+            task.result_json.latency_profile,
+            track_type=TrackType.MODEL.value,
+    ):
+        res = self.client.chat.completions.create(
+            model="gpt-4o",
+            messages=messages,
+        )
+    return res.choices[0].message.content
+
+
+
+ +
+ +
+ + +

+ gpt_4o_text_only(task) + +

+ + +
+ +

Get the text only +Args: + task:

+

Returns:

+ +
+ Source code in Agent/modules/openai/handler.py +
116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
def gpt_4o_text_only(self, task: Task) -> str:
+    """
+    Get the text only
+    Args:
+        task:
+
+    Returns:
+
+    """
+    params = OpenAIGPT4OTextOnlyParameters(**task.parameters)
+    text = params.text
+    prompt_template = params.prompt_template
+    logger.info(f"Text: {text}")
+    prompt = prompt_template.format(text=text)
+    messages = [
+        {
+            "role": "user",
+            "content": [
+                {"type": "text", "text": prompt},
+            ],
+        }
+    ]
+
+    with time_tracker(
+            "gpt-4o-call",
+            task.result_json.latency_profile,
+            track_type=TrackType.MODEL.value,
+    ):
+        res = self.client.chat.completions.create(
+            model="gpt-4o",
+            messages=messages,
+        )
+    return res.choices[0].message.content
+
+
+
+ +
+ +
+ + +

+ handle_task(task) + +

+ + +
+ +

Handle the task +Args: + task (Task): The task

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Task + +
+

The task with the result

+
+
+ +
+ Source code in Agent/modules/openai/handler.py +
28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
def handle_task(self, task: Task) -> Task:
+    """
+    Handle the task
+    Args:
+        task (Task): The task
+
+    Returns:
+        The task with the result
+    """
+    result_profile = {}
+    latency_profile = {}
+    TimeLogger.log_task(task, "start_openai")
+    if "speech2text" in task.task_name:
+        TimeLogger.log(latency_profile, "start_openai_speech2text")
+        text = self.speech2text(task)
+        TimeLogger.log(latency_profile, "end_openai_speech2text")
+        result_profile["text"] = text
+    if "openai_gpt_4o_text_and_image" in task.task_name:
+        TimeLogger.log(latency_profile, "start_openai_gpt_4o")
+        text = self.gpt_4o_text_and_images(task)
+        TimeLogger.log(latency_profile, "end_openai_gpt_4o")
+        result_profile["text"] = text
+    if "openai_gpt_4o_text_only" in task.task_name:
+        TimeLogger.log(latency_profile, "start_openai_gpt_4o")
+        text = self.gpt_4o_text_only(task)
+        TimeLogger.log(latency_profile, "end_openai_gpt_4o")
+        result_profile["text"] = text
+    if "openai_gpt_35" in task.task_name:
+        TimeLogger.log(latency_profile, "start_openai_gpt_35")
+        text = self.gpt_35(task)
+        TimeLogger.log(latency_profile, "end_openai_gpt_35")
+        result_profile["text"] = text
+    if "text2speech" in task.task_name:
+        TimeLogger.log(latency_profile, "start_openai_text2speech")
+        audio_file_path = self.text2speech(task)
+        TimeLogger.log(latency_profile, "end_openai_text2speech")
+        result_profile["audio_file_path"] = audio_file_path.split("/")[-1]
+    task.result_status = ResultStatus.completed.value
+    task.result_json.result_profile.update(result_profile)
+    task.result_json.latency_profile.update(latency_profile)
+    TimeLogger.log_task(task, "end_openai")
+    return task
+
+
+
+ +
+ +
+ + +

+ speech2text(task) + +

+ + +
+ +

Call OpenAI endpoints to convert speech to text +Args: + task (Task): The path to the audio file

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + Optional[str] + +
+

The transcribed text

+
+
+ +
+ Source code in Agent/modules/openai/handler.py +
 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
def speech2text(self, task: Task) -> Optional[str]:
+    """
+    Call OpenAI endpoints to convert speech to text
+    Args:
+        task (Task): The path to the audio file
+
+    Returns:
+        str: The transcribed text
+    """
+
+    try:
+        logger.info(task.parameters)
+        params = Speech2TextParameters(**task.parameters)
+        with time_tracker(
+                "locate_audio_file",
+                task.result_json.latency_profile,
+                track_type=TrackType.TRANSFER.value,
+        ):
+            audio_file_path = Speech2Text.locate_audio_file(
+                params.uid, params.audio_index, params.end_time
+            )
+
+        logger.info(f"Transcribing audio file: {audio_file_path}")
+
+        audio_file_path = Path(audio_file_path)
+        if not audio_file_path.exists():
+            logger.error(f"Audio file {audio_file_path} not found")
+            return None
+        with time_tracker(
+                "openai_stt",
+                task.result_json.latency_profile,
+                track_type=TrackType.MODEL.value,
+        ):
+            with open(audio_file_path, "rb") as audio_file:
+                res = self.client.audio.transcriptions.create(
+                    model="whisper-1", file=audio_file
+                )
+
+        text = res.text
+        logger.info(f"Transcription result: {text}")
+        return text
+    except Exception as e:
+        logger.error(f"Error transcribing audio file: {e}")
+    return None
+
+
+
+ +
+ +
+ + +

+ text2speech(task) + +

+ + +
+ +

Call OpenAI endpoints to convert text to speech +Args: + task (Task): The text to convert

+

Returns:

+ +
+ Source code in Agent/modules/openai/handler.py +
287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
def text2speech(self, task: Task) -> Optional[str]:
+    """
+    Call OpenAI endpoints to convert text to speech
+    Args:
+        task (Task): The text to convert
+
+    Returns:
+
+    """
+    params = Text2SpeechParameters(**task.parameters)
+    text = params.text
+    logger.info(f"Text: {text}")
+    output_audio_file_path = DATA_DIR / "tts" / f"{task.id}.mp3"
+    # if folder does not exist, create it
+    output_audio_file_path.parent.mkdir(parents=True, exist_ok=True)
+    output_audio_file_path = output_audio_file_path.as_posix()
+
+    with time_tracker(
+            "openai_tts",
+            task.result_json.latency_profile,
+            track_type=TrackType.MODEL.value,
+    ):
+        res = self.client.audio.speech.create(
+            model="tts-1",
+            voice="alloy",
+            input=text,
+        )
+    with time_tracker(
+            "save_audio",
+            task.result_json.latency_profile,
+            track_type=TrackType.TRANSFER.value,
+    ):
+        res.stream_to_file(output_audio_file_path)
+    return output_audio_file_path
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/quantization_llm/adaptor_worker/index.html b/Sources/Agent/modules/quantization_llm/adaptor_worker/index.html new file mode 100644 index 00000000..b67735ea --- /dev/null +++ b/Sources/Agent/modules/quantization_llm/adaptor_worker/index.html @@ -0,0 +1,5231 @@ + + + + + + + + + + + + + + + + + + + + + + + + + AdaptorWorker - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AdaptorWorker

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ QuantizationLLMAdaptor + + +

+ + +
+ + +

This is the adaptor for the Quantization LLM model

+ +
+ Source code in Agent/modules/quantization_llm/adaptor_worker.py +
 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
class QuantizationLLMAdaptor:
+    """
+    This is the adaptor for the Quantization LLM model
+    """
+
+    def __init__(self, model_config: QuantizationLLMModelConfig):
+        self.model_config = model_config
+        self.model_path = model_config.model_path()
+        self.llm = self.model_config.llm
+
+    def create_completion(self, prompt: str) -> str:
+        """
+        Create completion for the given prompt
+        Args:
+            prompt (str): The prompt to generate completion for the model
+
+        Returns:
+            str: The completion generated by the model
+
+        """
+
+        output = self.llm(
+            f"Q: {prompt} A: ",
+            max_tokens=500,  # Generate up to 32 tokens, set to None to generate up to the end of the context window
+            stop=[
+                "Q:",
+                "\n",
+            ],  # Stop generating just before the model would generate a new question
+            echo=True,  # Echo the prompt back in the output
+        )
+        logger.info(f"Response: {output}")
+        return output
+
+    def create_chat_completion(
+        self,
+        prompt: str = None,
+        messages: List[Dict[str, str]] = None,
+        tools: List[ChatCompletionTool] = None,
+        tool_choice: ChatCompletionToolChoiceOption = None,
+        *args,
+        **kwargs,
+    ):
+        """
+        Create chat completion for the given prompt and messages
+        Args:
+            prompt (str): The prompt to generate completion for the model
+            messages (List[Dict[str, str]]): The messages to generate completion for the model
+            tools (List[ChatCompletionTool]): The tools to use for chat completion
+            tool_choice (ChatCompletionToolChoiceOption): The tool choice to use for chat completion
+            *args:
+            **kwargs:
+
+        Returns:
+
+        """
+        if messages is not None:
+            """
+            This is trying to replicate passing all params chat completion provided via llama_cpp
+            """
+
+            logger.info(f"Creating chat completion for messages: {messages}")
+            return self.llm.create_chat_completion(
+                messages=messages, tools=tools, tool_choice=tool_choice
+            )
+
+        if prompt:
+            """
+            Simple version of it, without message "role" definition
+            """
+
+            res = self.llm.create_chat_completion(
+                messages=[
+                    {"role": "user", "content": prompt},
+                ]
+            )
+            return res
+
+        raise ValueError("Prompt or messages are required")
+
+    def create_embedding(self, text: str) -> List[float]:
+        """
+        Create embedding for the given text
+        Args:
+            text (str): The text to generate embedding for
+
+        Returns:
+            List[float]: The embedding generated by the model
+
+        """
+        if text is None:
+            raise ValueError("Text is required")
+
+        logger.info(f"Creating embedding for text: {text}")
+        return self.llm.create_embedding(text)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ create_chat_completion(prompt=None, messages=None, tools=None, tool_choice=None, *args, **kwargs) + +

+ + +
+ +

Create chat completion for the given prompt and messages +Args: + prompt (str): The prompt to generate completion for the model + messages (List[Dict[str, str]]): The messages to generate completion for the model + tools (List[ChatCompletionTool]): The tools to use for chat completion + tool_choice (ChatCompletionToolChoiceOption): The tool choice to use for chat completion + args: + *kwargs:

+

Returns:

+ +
+ Source code in Agent/modules/quantization_llm/adaptor_worker.py +
45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
def create_chat_completion(
+    self,
+    prompt: str = None,
+    messages: List[Dict[str, str]] = None,
+    tools: List[ChatCompletionTool] = None,
+    tool_choice: ChatCompletionToolChoiceOption = None,
+    *args,
+    **kwargs,
+):
+    """
+    Create chat completion for the given prompt and messages
+    Args:
+        prompt (str): The prompt to generate completion for the model
+        messages (List[Dict[str, str]]): The messages to generate completion for the model
+        tools (List[ChatCompletionTool]): The tools to use for chat completion
+        tool_choice (ChatCompletionToolChoiceOption): The tool choice to use for chat completion
+        *args:
+        **kwargs:
+
+    Returns:
+
+    """
+    if messages is not None:
+        """
+        This is trying to replicate passing all params chat completion provided via llama_cpp
+        """
+
+        logger.info(f"Creating chat completion for messages: {messages}")
+        return self.llm.create_chat_completion(
+            messages=messages, tools=tools, tool_choice=tool_choice
+        )
+
+    if prompt:
+        """
+        Simple version of it, without message "role" definition
+        """
+
+        res = self.llm.create_chat_completion(
+            messages=[
+                {"role": "user", "content": prompt},
+            ]
+        )
+        return res
+
+    raise ValueError("Prompt or messages are required")
+
+
+
+ +
+ +
+ + +

+ create_completion(prompt) + +

+ + +
+ +

Create completion for the given prompt +Args: + prompt (str): The prompt to generate completion for the model

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

The completion generated by the model

+
+
+ +
+ Source code in Agent/modules/quantization_llm/adaptor_worker.py +
22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
def create_completion(self, prompt: str) -> str:
+    """
+    Create completion for the given prompt
+    Args:
+        prompt (str): The prompt to generate completion for the model
+
+    Returns:
+        str: The completion generated by the model
+
+    """
+
+    output = self.llm(
+        f"Q: {prompt} A: ",
+        max_tokens=500,  # Generate up to 32 tokens, set to None to generate up to the end of the context window
+        stop=[
+            "Q:",
+            "\n",
+        ],  # Stop generating just before the model would generate a new question
+        echo=True,  # Echo the prompt back in the output
+    )
+    logger.info(f"Response: {output}")
+    return output
+
+
+
+ +
+ +
+ + +

+ create_embedding(text) + +

+ + +
+ +

Create embedding for the given text +Args: + text (str): The text to generate embedding for

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ List[float] + +
+

List[float]: The embedding generated by the model

+
+
+ +
+ Source code in Agent/modules/quantization_llm/adaptor_worker.py +
 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
def create_embedding(self, text: str) -> List[float]:
+    """
+    Create embedding for the given text
+    Args:
+        text (str): The text to generate embedding for
+
+    Returns:
+        List[float]: The embedding generated by the model
+
+    """
+    if text is None:
+        raise ValueError("Text is required")
+
+    logger.info(f"Creating embedding for text: {text}")
+    return self.llm.create_embedding(text)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/quantization_llm/handler/index.html b/Sources/Agent/modules/quantization_llm/handler/index.html new file mode 100644 index 00000000..7aa94aba --- /dev/null +++ b/Sources/Agent/modules/quantization_llm/handler/index.html @@ -0,0 +1,5197 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Handler - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Handler

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ QuantizationLLM + + +

+ + +
+ + +
+ Source code in Agent/modules/quantization_llm/handler.py +
 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
class QuantizationLLM:
+    def __init__(self, api: API):
+        """
+        Here is used to load and manage the quantization LLM model
+
+        Args:
+            api (API): The API object to query the API
+        """
+        # query the available models
+        # init for llm models
+        self.api_llm_available_models = api.get_available_models()
+        logger.info(f"Available LLM Models: {len(self.api_llm_available_models)}")
+        self.local_llm_available_models = {}
+        for model in self.api_llm_available_models:
+            self.local_llm_available_models[model["model_name"]] = (
+                QuantizationLLMModelConfig(**model)
+            )
+
+    def handle_task(self, task: Task):
+        """
+        Handle the task
+        Args:
+            task (Task): The task to handle
+
+        Returns:
+
+        """
+        TimeLogger.log_task(task, "start_quantization_llm")
+        result_profile = {}
+        latency_profile = {}
+        quantization_llm_parameters = QuantizationLLMParameters(**task.parameters)
+        text = quantization_llm_parameters.text
+        llm_model_name = quantization_llm_parameters.llm_model_name
+        # get llm_model
+        llm_model = self.local_llm_available_models.get(llm_model_name, None)
+        if llm_model is None:
+            logger.error(f"Model {llm_model_name} not found")
+            task.result_status = ResultStatus.failed.value
+            task.description = f"Model {llm_model_name} not found"
+            return task
+
+        if llm_model.llm is None:
+            logger.error(f"Model {llm_model_name} not loaded")
+            try:
+                with time_tracker(
+                    "init_llm", latency_profile, track_type=TrackType.MODEL.value
+                ):
+                    llm_model.init_llm()
+            except Exception as llm_err:
+                logger.exception(llm_err)
+                task.result_status = ResultStatus.failed.value
+                task.description = str(llm_err)
+                return task
+        with time_tracker("infer", latency_profile, track_type=TrackType.MODEL.value):
+            logger.info(f"Text: {text}")
+            res_text, logs = self.infer(
+                text=text,
+                llm_model_config=llm_model,
+            )
+        result_profile["logs"] = logs
+        result_profile["text"] = res_text
+        task.result_status = ResultStatus.completed.value
+        task.result_json.result_profile.update(result_profile)
+        task.result_json.latency_profile.update(latency_profile)
+        TimeLogger.log_task(task, "end_quantization_llm")
+        return task
+
+    @staticmethod
+    def infer(text: str, llm_model_config: QuantizationLLMModelConfig):
+        """
+        Infer the task
+        Args:
+            text (str): The text to infer
+            llm_model_config (QuantizationLLMModelConfig): The llm model config
+
+        Returns:
+
+        """
+        llm_adaptor = QuantizationLLMAdaptor(llm_model_config)
+        res = llm_adaptor.create_chat_completion(
+            prompt=text,
+        )
+        logger.info(res)
+        text = res["choices"][0]["message"]["content"]
+
+        return text, res
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(api) + +

+ + +
+ +

Here is used to load and manage the quantization LLM model

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
api + API + +
+

The API object to query the API

+
+
+ required +
+ +
+ Source code in Agent/modules/quantization_llm/handler.py +
16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
def __init__(self, api: API):
+    """
+    Here is used to load and manage the quantization LLM model
+
+    Args:
+        api (API): The API object to query the API
+    """
+    # query the available models
+    # init for llm models
+    self.api_llm_available_models = api.get_available_models()
+    logger.info(f"Available LLM Models: {len(self.api_llm_available_models)}")
+    self.local_llm_available_models = {}
+    for model in self.api_llm_available_models:
+        self.local_llm_available_models[model["model_name"]] = (
+            QuantizationLLMModelConfig(**model)
+        )
+
+
+
+ +
+ +
+ + +

+ handle_task(task) + +

+ + +
+ +

Handle the task +Args: + task (Task): The task to handle

+

Returns:

+ +
+ Source code in Agent/modules/quantization_llm/handler.py +
33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
def handle_task(self, task: Task):
+    """
+    Handle the task
+    Args:
+        task (Task): The task to handle
+
+    Returns:
+
+    """
+    TimeLogger.log_task(task, "start_quantization_llm")
+    result_profile = {}
+    latency_profile = {}
+    quantization_llm_parameters = QuantizationLLMParameters(**task.parameters)
+    text = quantization_llm_parameters.text
+    llm_model_name = quantization_llm_parameters.llm_model_name
+    # get llm_model
+    llm_model = self.local_llm_available_models.get(llm_model_name, None)
+    if llm_model is None:
+        logger.error(f"Model {llm_model_name} not found")
+        task.result_status = ResultStatus.failed.value
+        task.description = f"Model {llm_model_name} not found"
+        return task
+
+    if llm_model.llm is None:
+        logger.error(f"Model {llm_model_name} not loaded")
+        try:
+            with time_tracker(
+                "init_llm", latency_profile, track_type=TrackType.MODEL.value
+            ):
+                llm_model.init_llm()
+        except Exception as llm_err:
+            logger.exception(llm_err)
+            task.result_status = ResultStatus.failed.value
+            task.description = str(llm_err)
+            return task
+    with time_tracker("infer", latency_profile, track_type=TrackType.MODEL.value):
+        logger.info(f"Text: {text}")
+        res_text, logs = self.infer(
+            text=text,
+            llm_model_config=llm_model,
+        )
+    result_profile["logs"] = logs
+    result_profile["text"] = res_text
+    task.result_status = ResultStatus.completed.value
+    task.result_json.result_profile.update(result_profile)
+    task.result_json.latency_profile.update(latency_profile)
+    TimeLogger.log_task(task, "end_quantization_llm")
+    return task
+
+
+
+ +
+ +
+ + +

+ infer(text, llm_model_config) + + + staticmethod + + +

+ + +
+ +

Infer the task +Args: + text (str): The text to infer + llm_model_config (QuantizationLLMModelConfig): The llm model config

+

Returns:

+ +
+ Source code in Agent/modules/quantization_llm/handler.py +
 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
@staticmethod
+def infer(text: str, llm_model_config: QuantizationLLMModelConfig):
+    """
+    Infer the task
+    Args:
+        text (str): The text to infer
+        llm_model_config (QuantizationLLMModelConfig): The llm model config
+
+    Returns:
+
+    """
+    llm_adaptor = QuantizationLLMAdaptor(llm_model_config)
+    res = llm_adaptor.create_chat_completion(
+        prompt=text,
+    )
+    logger.info(res)
+    text = res["choices"][0]["message"]["content"]
+
+    return text, res
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/quantization_llm/models/index.html b/Sources/Agent/modules/quantization_llm/models/index.html new file mode 100644 index 00000000..cd2d3147 --- /dev/null +++ b/Sources/Agent/modules/quantization_llm/models/index.html @@ -0,0 +1,5150 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Models - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Models

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ QuantizationLLMModelConfig + + +

+ + +
+ + +
+ Source code in Agent/modules/quantization_llm/models.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
class QuantizationLLMModelConfig:
+    def __init__(
+        self,
+        model_name: str,
+        model_family: str,
+        repo: str,
+        filename: str,
+        file_size: float,
+        available: bool,
+        *args,
+        **kwargs,
+    ):
+        """
+        Initialize the LLM Model Config
+        Args:
+            model_name (str): The name of the model
+            model_size (str): The size of the model
+            model_family (str): The family of the model
+            model_type (str): The type of the model
+            repo (str): The repo of the model
+            filename (str): The filename of the model
+            file_size (float): The size of the model file
+            available (bool): If the model is
+            *args:
+            **kwargs:
+        """
+        self.model_name = model_name
+        self.model_family = model_family
+        self.repo = repo
+        self.filename = filename
+        self.file_size = file_size
+        self.available = available
+        self.llm = None
+        logger.debug(args)
+        logger.debug(kwargs)
+
+    def model_path(self):
+        """
+        Check or load the model from the local directory
+        Returns:
+
+        """
+        model_file = LLM_MODEL_DIR / self.model_family / self.filename
+        if model_file.exists():
+            return model_file
+        if self.download_model():
+            return model_file
+        return None
+
+    def download_model(self):
+        """
+        If the model is not available, download it from the HuggingFace Hub
+        Returns:
+        """
+
+        download_url = hf_hub_url(repo_id=self.repo, filename=self.filename)
+        logger.critical(f"Downloading model from {download_url}")
+        model_general_folder = LLM_MODEL_DIR / self.model_family
+        logger.critical(f"Model folder {model_general_folder}")
+        model_general_folder.mkdir(parents=True, exist_ok=True)
+        filename = model_general_folder / self.filename
+        response = requests.get(download_url, stream=True)
+        # Total size in bytes.
+        total_size = int(response.headers.get("content-length", 0))
+        block_size = 1024  # 1 Kilobyte
+        logger.critical(f"Downloading {self.filename} to {model_general_folder}")
+        logger.critical(f"Total size: {total_size}")
+        progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
+        with open(filename, "wb") as file:
+            for data in response.iter_content(block_size):
+                progress_bar.update(len(data))
+                file.write(data)
+        progress_bar.close()
+        if total_size != 0 and progress_bar.n != total_size:
+            logger.error("ERROR, something went wrong")
+            return False
+        return True
+
+    def init_llm(self):
+        self.llm = Llama(
+            model_path=self.model_path().as_posix(),
+            n_gpu_layers=-1,
+            embedding=True,
+            n_ctx=4096,
+        )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(model_name, model_family, repo, filename, file_size, available, *args, **kwargs) + +

+ + +
+ +

Initialize the LLM Model Config +Args: + model_name (str): The name of the model + model_size (str): The size of the model + model_family (str): The family of the model + model_type (str): The type of the model + repo (str): The repo of the model + filename (str): The filename of the model + file_size (float): The size of the model file + available (bool): If the model is + args: + *kwargs:

+ +
+ Source code in Agent/modules/quantization_llm/models.py +
13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
def __init__(
+    self,
+    model_name: str,
+    model_family: str,
+    repo: str,
+    filename: str,
+    file_size: float,
+    available: bool,
+    *args,
+    **kwargs,
+):
+    """
+    Initialize the LLM Model Config
+    Args:
+        model_name (str): The name of the model
+        model_size (str): The size of the model
+        model_family (str): The family of the model
+        model_type (str): The type of the model
+        repo (str): The repo of the model
+        filename (str): The filename of the model
+        file_size (float): The size of the model file
+        available (bool): If the model is
+        *args:
+        **kwargs:
+    """
+    self.model_name = model_name
+    self.model_family = model_family
+    self.repo = repo
+    self.filename = filename
+    self.file_size = file_size
+    self.available = available
+    self.llm = None
+    logger.debug(args)
+    logger.debug(kwargs)
+
+
+
+ +
+ +
+ + +

+ download_model() + +

+ + +
+ +

If the model is not available, download it from the HuggingFace Hub +Returns:

+ +
+ Source code in Agent/modules/quantization_llm/models.py +
61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
def download_model(self):
+    """
+    If the model is not available, download it from the HuggingFace Hub
+    Returns:
+    """
+
+    download_url = hf_hub_url(repo_id=self.repo, filename=self.filename)
+    logger.critical(f"Downloading model from {download_url}")
+    model_general_folder = LLM_MODEL_DIR / self.model_family
+    logger.critical(f"Model folder {model_general_folder}")
+    model_general_folder.mkdir(parents=True, exist_ok=True)
+    filename = model_general_folder / self.filename
+    response = requests.get(download_url, stream=True)
+    # Total size in bytes.
+    total_size = int(response.headers.get("content-length", 0))
+    block_size = 1024  # 1 Kilobyte
+    logger.critical(f"Downloading {self.filename} to {model_general_folder}")
+    logger.critical(f"Total size: {total_size}")
+    progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
+    with open(filename, "wb") as file:
+        for data in response.iter_content(block_size):
+            progress_bar.update(len(data))
+            file.write(data)
+    progress_bar.close()
+    if total_size != 0 and progress_bar.n != total_size:
+        logger.error("ERROR, something went wrong")
+        return False
+    return True
+
+
+
+ +
+ +
+ + +

+ model_path() + +

+ + +
+ +

Check or load the model from the local directory +Returns:

+ +
+ Source code in Agent/modules/quantization_llm/models.py +
48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
def model_path(self):
+    """
+    Check or load the model from the local directory
+    Returns:
+
+    """
+    model_file = LLM_MODEL_DIR / self.model_family / self.filename
+    if model_file.exists():
+        return model_file
+    if self.download_model():
+        return model_file
+    return None
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/rag/handler/index.html b/Sources/Agent/modules/rag/handler/index.html new file mode 100644 index 00000000..f8cf0a77 --- /dev/null +++ b/Sources/Agent/modules/rag/handler/index.html @@ -0,0 +1,4830 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Handler - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Handler

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ RAGHandler + + +

+ + +
+ + +
+ Source code in Agent/modules/rag/handler.py +
13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
class RAGHandler:
+    def __init__(self):
+        pass
+
+    def handle_task(self, task: Task) -> Task:
+        """
+        Handle the task
+        Args:
+            task:
+
+        Returns:
+
+        """
+        result_profile = {}
+        latency_profile = {}
+        TimeLogger.log_task(task, "start_rag")
+        # NOTE: this is a placeholder for the actual implementation
+        result_profile["text"] = "This is a placeholder for the actual implementation"
+        task.result_status = ResultStatus.completed.value
+        task.result_json.result_profile.update(result_profile)
+        task.result_json.latency_profile.update(latency_profile)
+        TimeLogger.log_task(task, "end_rag")
+        return task
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ handle_task(task) + +

+ + +
+ +

Handle the task +Args: + task:

+

Returns:

+ +
+ Source code in Agent/modules/rag/handler.py +
17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
def handle_task(self, task: Task) -> Task:
+    """
+    Handle the task
+    Args:
+        task:
+
+    Returns:
+
+    """
+    result_profile = {}
+    latency_profile = {}
+    TimeLogger.log_task(task, "start_rag")
+    # NOTE: this is a placeholder for the actual implementation
+    result_profile["text"] = "This is a placeholder for the actual implementation"
+    task.result_status = ResultStatus.completed.value
+    task.result_json.result_profile.update(result_profile)
+    task.result_json.latency_profile.update(latency_profile)
+    TimeLogger.log_task(task, "end_rag")
+    return task
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/rag/neo4j_connector/index.html b/Sources/Agent/modules/rag/neo4j_connector/index.html new file mode 100644 index 00000000..f960b2c5 --- /dev/null +++ b/Sources/Agent/modules/rag/neo4j_connector/index.html @@ -0,0 +1,4637 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Neo4jConnector - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Neo4jConnector

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/rag/postgresql_connector/index.html b/Sources/Agent/modules/rag/postgresql_connector/index.html new file mode 100644 index 00000000..bba96cb9 --- /dev/null +++ b/Sources/Agent/modules/rag/postgresql_connector/index.html @@ -0,0 +1,4637 @@ + + + + + + + + + + + + + + + + + + + + + + + + + PostgreSQLConnector - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

PostgreSQLConnector

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/speech_to_text/speech2text/index.html b/Sources/Agent/modules/speech_to_text/speech2text/index.html new file mode 100644 index 00000000..d33887ba --- /dev/null +++ b/Sources/Agent/modules/speech_to_text/speech2text/index.html @@ -0,0 +1,5408 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Speech2Text - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Speech2Text

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Speech2Text + + +

+ + +
+ + +
+ Source code in Agent/modules/speech_to_text/speech2text.py +
 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
class Speech2Text:
+    SUPPORTED_MODELS = ["whisper"]
+
+    def __init__(
+        self,
+        model_name: str = "whisper",
+        model_size: str = "small",
+        multi_language: bool = True,
+    ):
+        """
+        Initialize the translator
+        Args:
+            model_name (str): The name of the model to use
+            model_size (str): The size of the model to use
+            multi_language (bool): If the model is multi-language
+        """
+        self.model_name = model_name
+        if self.model_name == "whisper":
+            if not multi_language and "large" not in model_size:
+                model_size = f"{model_size}.en"
+            self.audio_model = whisper.load_model(model_size)
+        else:
+            raise ValueError(f"Model {model_name} not supported")
+
+    @staticmethod
+    def locate_audio_file(uid: str, sequence_index: str, end_time: str):
+        """
+        Locate the audio file
+        Args:
+            uid (str): The uid
+            sequence_index (str): The sequence index
+            end_time (str): The end time
+
+        Returns:
+            The audio file (str): The audio file
+        """
+        audio_folder = CLIENT_DATA_FOLDER / "audio" / uid
+        # audio file will be within this folder, and name like sequence_index-endtimetimestap.wav
+        end_time_obj = datetime.strptime(end_time, "%Y-%m-%dT%H:%M:%S.%f")
+        audio_file = (
+            audio_folder
+            / f"{sequence_index}-{end_time_obj.strftime('%Y%m%d%H%M%S')}.wav"
+        )
+        if not audio_file.exists():
+            logger.error(f"Audio file {audio_file} not found")
+            raise FileNotFoundError(f"Audio file {audio_file} not found")
+        return audio_file
+
+    def translate(self, message: Speech2TextParameters, task: Task) -> Task:
+        """
+        This is the key function to translate the audio to text
+        Args:
+            message (dict): The message to translate
+            task (Task): The task
+
+        Returns:
+            task (Task): The task
+
+        """
+
+        logger.info(f"Translating message {message}")
+        # read the data from the audio file in .wav file, then do the translation
+        audio_file = self.locate_audio_file(
+            message.uid, message.audio_index, message.end_time
+        )
+        logger.info(f"Audio file {audio_file}")
+        if audio_file is None:
+            return task
+
+        with timer(logger, "Loading audio"):
+            with time_tracker(
+                "load_audio",
+                task.result_json.latency_profile,
+                track_type=TrackType.MODEL.value,
+            ):
+                audio_np = whisper.load_audio(audio_file.as_posix())
+
+        with timer(logger, "Transcribing"):
+            with time_tracker(
+                "transcribe",
+                task.result_json.latency_profile,
+                track_type=TrackType.MODEL.value,
+            ):
+                result = self.audio_model.transcribe(
+                    audio_np, fp16=torch.cuda.is_available()
+                )
+        logger.critical(result)
+        task.result_json.result_profile.update(result)
+        return task
+
+    def handle_task(self, task: Task) -> Task:
+        """
+        Args:
+            task: The task to process
+
+        Returns:
+            The processed task
+        """
+        try:
+            task_parameters = Speech2TextParameters(**task.parameters)
+            TimeLogger.log_task(task, "start_translate")
+            task = self.translate(task_parameters, task)
+            TimeLogger.log_task(task, "end_translate")
+            task.result_status = ResultStatus.completed.value
+        except FileNotFoundError:
+            # then we need to try later as the sync is not done yet
+            logger.error("Audio file not found, will try later")
+            task.result_status = ResultStatus.pending.value
+        except Exception as e:
+            logger.error(e)
+            task.result_status = ResultStatus.failed.value
+            task.description = str(e)
+        return task
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(model_name='whisper', model_size='small', multi_language=True) + +

+ + +
+ +

Initialize the translator +Args: + model_name (str): The name of the model to use + model_size (str): The size of the model to use + multi_language (bool): If the model is multi-language

+ +
+ Source code in Agent/modules/speech_to_text/speech2text.py +
21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
def __init__(
+    self,
+    model_name: str = "whisper",
+    model_size: str = "small",
+    multi_language: bool = True,
+):
+    """
+    Initialize the translator
+    Args:
+        model_name (str): The name of the model to use
+        model_size (str): The size of the model to use
+        multi_language (bool): If the model is multi-language
+    """
+    self.model_name = model_name
+    if self.model_name == "whisper":
+        if not multi_language and "large" not in model_size:
+            model_size = f"{model_size}.en"
+        self.audio_model = whisper.load_model(model_size)
+    else:
+        raise ValueError(f"Model {model_name} not supported")
+
+
+
+ +
+ +
+ + +

+ handle_task(task) + +

+ + +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task + Task + +
+

The task to process

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Task + +
+

The processed task

+
+
+ +
+ Source code in Agent/modules/speech_to_text/speech2text.py +
108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
def handle_task(self, task: Task) -> Task:
+    """
+    Args:
+        task: The task to process
+
+    Returns:
+        The processed task
+    """
+    try:
+        task_parameters = Speech2TextParameters(**task.parameters)
+        TimeLogger.log_task(task, "start_translate")
+        task = self.translate(task_parameters, task)
+        TimeLogger.log_task(task, "end_translate")
+        task.result_status = ResultStatus.completed.value
+    except FileNotFoundError:
+        # then we need to try later as the sync is not done yet
+        logger.error("Audio file not found, will try later")
+        task.result_status = ResultStatus.pending.value
+    except Exception as e:
+        logger.error(e)
+        task.result_status = ResultStatus.failed.value
+        task.description = str(e)
+    return task
+
+
+
+ +
+ +
+ + +

+ locate_audio_file(uid, sequence_index, end_time) + + + staticmethod + + +

+ + +
+ +

Locate the audio file +Args: + uid (str): The uid + sequence_index (str): The sequence index + end_time (str): The end time

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The audio file (str): The audio file

+
+
+ +
+ Source code in Agent/modules/speech_to_text/speech2text.py +
42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
@staticmethod
+def locate_audio_file(uid: str, sequence_index: str, end_time: str):
+    """
+    Locate the audio file
+    Args:
+        uid (str): The uid
+        sequence_index (str): The sequence index
+        end_time (str): The end time
+
+    Returns:
+        The audio file (str): The audio file
+    """
+    audio_folder = CLIENT_DATA_FOLDER / "audio" / uid
+    # audio file will be within this folder, and name like sequence_index-endtimetimestap.wav
+    end_time_obj = datetime.strptime(end_time, "%Y-%m-%dT%H:%M:%S.%f")
+    audio_file = (
+        audio_folder
+        / f"{sequence_index}-{end_time_obj.strftime('%Y%m%d%H%M%S')}.wav"
+    )
+    if not audio_file.exists():
+        logger.error(f"Audio file {audio_file} not found")
+        raise FileNotFoundError(f"Audio file {audio_file} not found")
+    return audio_file
+
+
+
+ +
+ +
+ + +

+ translate(message, task) + +

+ + +
+ +

This is the key function to translate the audio to text +Args: + message (dict): The message to translate + task (Task): The task

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
task + Task + +
+

The task

+
+
+ +
+ Source code in Agent/modules/speech_to_text/speech2text.py +
 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
def translate(self, message: Speech2TextParameters, task: Task) -> Task:
+    """
+    This is the key function to translate the audio to text
+    Args:
+        message (dict): The message to translate
+        task (Task): The task
+
+    Returns:
+        task (Task): The task
+
+    """
+
+    logger.info(f"Translating message {message}")
+    # read the data from the audio file in .wav file, then do the translation
+    audio_file = self.locate_audio_file(
+        message.uid, message.audio_index, message.end_time
+    )
+    logger.info(f"Audio file {audio_file}")
+    if audio_file is None:
+        return task
+
+    with timer(logger, "Loading audio"):
+        with time_tracker(
+            "load_audio",
+            task.result_json.latency_profile,
+            track_type=TrackType.MODEL.value,
+        ):
+            audio_np = whisper.load_audio(audio_file.as_posix())
+
+    with timer(logger, "Transcribing"):
+        with time_tracker(
+            "transcribe",
+            task.result_json.latency_profile,
+            track_type=TrackType.MODEL.value,
+        ):
+            result = self.audio_model.transcribe(
+                audio_np, fp16=torch.cuda.is_available()
+            )
+    logger.critical(result)
+    task.result_json.result_profile.update(result)
+    return task
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/modules/text_to_speech/text2speech/index.html b/Sources/Agent/modules/text_to_speech/text2speech/index.html new file mode 100644 index 00000000..072c7c60 --- /dev/null +++ b/Sources/Agent/modules/text_to_speech/text2speech/index.html @@ -0,0 +1,5308 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Text2Speech - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Text2Speech

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Text2Speech + + +

+ + +
+ + +
+ Source code in Agent/modules/text_to_speech/text2speech.py +
 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
class Text2Speech:
+
+    def __init__(self, model_name: str = "openai", to_s3: bool = False):
+        """
+        Initialize the STT object
+
+        Args:
+            model_name (str): The name of the model to use
+            to_s3 (bool): If the audio file should be uploaded to S3
+        """
+
+        self.tts = None
+        self.model_name = model_name
+        self.to_s3 = to_s3
+
+    def handle_task(self, task: Task) -> Task:
+        """
+        Args:
+            task (Task): The task to handle
+
+        Returns:
+            The task with the result
+        """
+        TimeLogger.log_task(task, "start_text2speech")
+        text2speech_parameters = Text2SpeechParameters(**task.parameters)
+        logger.info(f"Text to speech: {text2speech_parameters.text}")
+
+        if self.model_name == "openai":
+            return self.text_to_speech_openai(
+                task=task, task_param=text2speech_parameters
+            )
+        TimeLogger.log_task(task, "end_text2speech")
+        return task
+
+    def text_to_speech_openai(
+        self, task: Task, task_param: Text2SpeechParameters
+    ) -> Task:
+        """
+        Convert the text to speech using OpenAI API
+        Args:
+            task (Task): The task to handle
+            task_param (Text2SpeechParameters): The parameters for the task
+
+        Returns:
+
+        """
+        result_profile = {}
+        latency_profile = {}
+        audio_file_path = DATA_DIR / "tts" / f"{task.id}.mp3"
+        # if folder does not exist, create it
+        audio_file_path.parent.mkdir(parents=True, exist_ok=True)
+        audio_file_path = audio_file_path.as_posix()
+
+        client = OpenAI()
+        with time_tracker("openai_tts", latency_profile, TrackType.MODEL.value):
+            response = client.audio.speech.create(
+                model="tts-1",
+                voice="alloy",
+                input=task_param.text,
+            )
+        with time_tracker("save_audio", latency_profile, TrackType.TRANSFER.value):
+            response.stream_to_file(audio_file_path)
+
+        result_profile["audio_file_path"] = audio_file_path.split("/")[-1]
+
+        if self.to_s3:
+            with time_tracker("to_s3", latency_profile, TrackType.TRANSFER.value):
+                self.upload_to_s3(audio_file_path, f"tts/{task.id}.mp3")
+
+        task.result_status = ResultStatus.completed.value
+        task.result_json.result_profile.update(result_profile)
+        task.result_json.latency_profile.update(latency_profile)
+        return task
+
+    @staticmethod
+    def upload_to_s3(file_path: str, s3_key: str):
+        """
+        Upload the file to S3
+        Args:
+            file_path (str): The path to the file
+            s3_key (str): The key to use in S3
+
+        """
+        s3_client = BOTO3_SESSION.client("s3")
+        s3_client.upload_file(
+            file_path,
+            S3_BUCKET,
+            s3_key,
+        )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(model_name='openai', to_s3=False) + +

+ + +
+ +

Initialize the STT object

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_name + str + +
+

The name of the model to use

+
+
+ 'openai' +
to_s3 + bool + +
+

If the audio file should be uploaded to S3

+
+
+ False +
+ +
+ Source code in Agent/modules/text_to_speech/text2speech.py +
17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
def __init__(self, model_name: str = "openai", to_s3: bool = False):
+    """
+    Initialize the STT object
+
+    Args:
+        model_name (str): The name of the model to use
+        to_s3 (bool): If the audio file should be uploaded to S3
+    """
+
+    self.tts = None
+    self.model_name = model_name
+    self.to_s3 = to_s3
+
+
+
+ +
+ +
+ + +

+ handle_task(task) + +

+ + +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task + Task + +
+

The task to handle

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Task + +
+

The task with the result

+
+
+ +
+ Source code in Agent/modules/text_to_speech/text2speech.py +
30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
def handle_task(self, task: Task) -> Task:
+    """
+    Args:
+        task (Task): The task to handle
+
+    Returns:
+        The task with the result
+    """
+    TimeLogger.log_task(task, "start_text2speech")
+    text2speech_parameters = Text2SpeechParameters(**task.parameters)
+    logger.info(f"Text to speech: {text2speech_parameters.text}")
+
+    if self.model_name == "openai":
+        return self.text_to_speech_openai(
+            task=task, task_param=text2speech_parameters
+        )
+    TimeLogger.log_task(task, "end_text2speech")
+    return task
+
+
+
+ +
+ +
+ + +

+ text_to_speech_openai(task, task_param) + +

+ + +
+ +

Convert the text to speech using OpenAI API +Args: + task (Task): The task to handle + task_param (Text2SpeechParameters): The parameters for the task

+

Returns:

+ +
+ Source code in Agent/modules/text_to_speech/text2speech.py +
49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
def text_to_speech_openai(
+    self, task: Task, task_param: Text2SpeechParameters
+) -> Task:
+    """
+    Convert the text to speech using OpenAI API
+    Args:
+        task (Task): The task to handle
+        task_param (Text2SpeechParameters): The parameters for the task
+
+    Returns:
+
+    """
+    result_profile = {}
+    latency_profile = {}
+    audio_file_path = DATA_DIR / "tts" / f"{task.id}.mp3"
+    # if folder does not exist, create it
+    audio_file_path.parent.mkdir(parents=True, exist_ok=True)
+    audio_file_path = audio_file_path.as_posix()
+
+    client = OpenAI()
+    with time_tracker("openai_tts", latency_profile, TrackType.MODEL.value):
+        response = client.audio.speech.create(
+            model="tts-1",
+            voice="alloy",
+            input=task_param.text,
+        )
+    with time_tracker("save_audio", latency_profile, TrackType.TRANSFER.value):
+        response.stream_to_file(audio_file_path)
+
+    result_profile["audio_file_path"] = audio_file_path.split("/")[-1]
+
+    if self.to_s3:
+        with time_tracker("to_s3", latency_profile, TrackType.TRANSFER.value):
+            self.upload_to_s3(audio_file_path, f"tts/{task.id}.mp3")
+
+    task.result_status = ResultStatus.completed.value
+    task.result_json.result_profile.update(result_profile)
+    task.result_json.latency_profile.update(latency_profile)
+    return task
+
+
+
+ +
+ +
+ + +

+ upload_to_s3(file_path, s3_key) + + + staticmethod + + +

+ + +
+ +

Upload the file to S3 +Args: + file_path (str): The path to the file + s3_key (str): The key to use in S3

+ +
+ Source code in Agent/modules/text_to_speech/text2speech.py +
 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
@staticmethod
+def upload_to_s3(file_path: str, s3_key: str):
+    """
+    Upload the file to S3
+    Args:
+        file_path (str): The path to the file
+        s3_key (str): The key to use in S3
+
+    """
+    s3_client = BOTO3_SESSION.client("s3")
+    s3_client.upload_file(
+        file_path,
+        S3_BUCKET,
+        s3_key,
+    )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/setup/index.html b/Sources/Agent/setup/index.html new file mode 100644 index 00000000..7db4f67b --- /dev/null +++ b/Sources/Agent/setup/index.html @@ -0,0 +1,4633 @@ + + + + + + + + + + + + + + + + + + + + + + + + + setup - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

setup

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/storage/index.html b/Sources/Agent/storage/index.html new file mode 100644 index 00000000..5786f95a --- /dev/null +++ b/Sources/Agent/storage/index.html @@ -0,0 +1,6076 @@ + + + + + + + + + + + + + + + + + + + + + + + + + storage - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

storage

+ +
+ + + + +
+ +

This is the storage module.

+

It will include two process

+
    +
  • One is to pull data down
  • +
  • Another is upload data
  • +
+ + + +
+ + + + + + + + +
+ + + +

+ StorageSolution + + +

+ + +
+ + +
+ Source code in Agent/storage.py +
 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
class StorageSolution:
+    def __init__(
+            self,
+            api_domain: str,
+            token: str,
+            input_source_dir: str = None,
+            output_dest_dir: str = None,
+            dest_password: str = None,
+    ):
+        self.api_domain = api_domain
+        self.token = token
+        self.api = API(domain=api_domain, token=token)
+        self.storage_solution = self.api.get_storage_solution()
+        self.input_source_dir = input_source_dir
+        self.output_dest_dir = output_dest_dir
+        self.dest_password = dest_password
+
+    def sync_push_data(self):
+        """
+        Sync the data to the storage
+        """
+        if self.storage_solution == "volume":
+            return
+        if self.storage_solution == "s3":
+            self.sync_push_s3()
+        if self.storage_solution == "local":
+            self.sync_push_local()
+        if self.storage_solution == "api":
+            self.sync_push_api()
+
+    def sync_push_local(self):
+        """
+        Sync the data to the local network
+        """
+        observer = Observer()
+        local_handler = LocalSyncHandler(
+            src_path=str(DATA_DIR / "tts"),
+            dest_path=self.output_dest_dir,
+            sshpass=self.dest_password,
+        )
+        observer.schedule(local_handler, str(DATA_DIR / "tts"), recursive=True)
+        observer.start()
+        try:
+            while True:
+                time.sleep(1)
+        except KeyboardInterrupt:
+            observer.stop()
+        observer.join()
+
+    @staticmethod
+    def sync_push_s3():
+        """
+        Sync the data to the s3
+        """
+        observer = Observer()
+        s3_handler = S3SyncHandler(s3_client=boto3.client("s3"))
+        observer.schedule(s3_handler, str(DATA_DIR / "tts"), recursive=True)
+        observer.start()
+        try:
+            while True:
+                time.sleep(1)
+        except KeyboardInterrupt:
+            observer.stop()
+        observer.join()
+
+    def sync_push_api(self):
+        """
+        Sync the data to the api
+        """
+        observer = Observer()
+        api_handler = APISyncHandler(self.api)
+        logger.info(str(DATA_DIR / "tts"))
+        observer.schedule(api_handler, str(DATA_DIR / "tts"), recursive=True)
+        observer.start()
+        try:
+            while True:
+                time.sleep(1)
+        except KeyboardInterrupt:
+            observer.stop()
+        observer.join()
+
+    def sync_pull_data(self):
+        """
+        If storage solution is volume or local, this means the data is accessible locally, do not need to worry about it
+        This will first call cloud to list all audio and video files
+        And then compare them with local ones
+        If there is any new files, download them
+
+        Returns:
+
+        """
+        if self.storage_solution == "volume":
+            return
+        if self.storage_solution == "local":
+            self.sync_pull_local()
+        if self.storage_solution == "s3":
+            self.sync_pull_s3()
+        if self.storage_solution == "api":
+            self.sync_pull_api()
+
+    def sync_pull_local(self):
+        """
+        Sync the data from the local network
+        directly run the rsync command
+        """
+        while True:
+            os.system(
+                "sshpass -p {} rsync -avz {} {}".format(self.dest_password, self.input_source_dir,
+                                                        str(CLIENT_DATA_FOLDER))
+            )
+            time.sleep(1)
+
+    def sync_pull_s3(self):
+        """
+        Sync the data from s3
+        """
+        pass
+
+    def sync_pull_api(self):
+        """
+        Sync the data from api
+        """
+        from_time = None
+        while True:
+            try:
+                logger.info(f"Syncing data from {from_time}")
+                files = self.api.list_files(from_time=from_time)
+                # set from time to now for the next sync in timestamp format
+                from_time = time.time()
+                self.download_data(files)
+            except Exception as e:
+                logger.error(f"Error syncing data: {e}")
+                logger.exception(e)
+            time.sleep(1)
+
+    def download_data(self, files):
+        """
+        Download the data from the cloud
+        Args:
+            files:
+
+        Returns:
+
+        """
+        audio_files = files.get("audio_files", [])
+        video_files = files.get("video_files", [])
+        logger.info(
+            f"Checking {len(audio_files)} audio files and {len(video_files)} video files"
+        )
+        for audio_file in audio_files:
+            dest_path = (
+                    CLIENT_DATA_FOLDER
+                    / "audio"
+                    / audio_file["uid"]
+                    / audio_file["audio_file"]
+            )
+            if not dest_path.exists():
+                # TODO: do the download here
+                logger.info(f"Downloading {audio_file['audio_file']} to {dest_path}")
+                dest_path.parent.mkdir(parents=True, exist_ok=True)
+                self.download_audio(audio_file["id"], dest_path)
+        for video_file in video_files:
+            dest_path = (
+                    CLIENT_DATA_FOLDER
+                    / "videos"
+                    / video_file["uid"]
+                    / video_file["video_file"]
+            )
+            if not dest_path.exists():
+                # TODO: do the download here
+                logger.info(f"Downloading {video_file['video_file']} to {dest_path}")
+                dest_path.parent.mkdir(parents=True, exist_ok=True)
+                self.download_video(video_file["id"], dest_path)
+
+    def download_audio(self, audio_file_id, dest_path: Path):
+        """
+        Download the audio file
+        Args:
+            audio_file_id (str): the audio file id
+            dest_path (str): the destination
+
+        Returns:
+
+        """
+        link_json = self.api.download_file_link(audio_file_id, "audio")
+        audio_url = link_json.get("audio_url", None)
+        if audio_url is None:
+            return
+
+        try:
+            r = requests.get(audio_url, stream=True)
+
+            if r.status_code != 404:
+                with open(dest_path, "wb") as f:
+                    for chunk in r.iter_content(chunk_size=1024):
+                        if chunk:
+                            f.write(chunk)
+            else:
+                logger.error(f"Error downloading audio file: {audio_url}, NOT FOUND")
+        except Exception as e:
+            logger.error(f"Error downloading audio file: {e}")
+
+    def download_video(self, video_file_id, dest_path: Path):
+        """
+        Download the video file
+        Args:
+            video_file_id (str): the video file id
+            dest_path (str): the destination
+
+        Returns:
+
+        """
+        link_json = self.api.download_file_link(video_file_id, "video")
+        video_url = link_json.get("video_url", None)
+        frames = link_json.get("frames", None)
+        logger.info(f"video_url: {video_url}, frames: {frames}")
+        if video_url is not None:
+            try:
+                r = requests.get(video_url, stream=True)
+                if r.status_code != 404:
+                    with open(dest_path, "wb") as f:
+                        for chunk in r.iter_content(chunk_size=1024):
+                            if chunk:
+                                f.write(chunk)
+                else:
+                    logger.error(
+                        f"Error downloading video file: {video_url}, NOT FOUND"
+                    )
+            except Exception as e:
+                logger.error(f"Error downloading video file: {e}")
+
+        for frame_url in frames:
+            # rsplit from the third /, get the last part
+
+            frame_path = dest_path.parent / "frames" / frame_url.rsplit("/", 3)[-1]
+            logger.info(f"Downloading frame {frame_url} to {frame_path}")
+            if frame_path.exists():
+                continue
+            try:
+                r = requests.get(frame_url, stream=True)
+                with open(frame_path, "wb") as f:
+                    for chunk in r.iter_content(chunk_size=1024):
+                        if chunk:
+                            f.write(chunk)
+            except Exception as e:
+                logger.error(f"Error downloading frame file: {e}")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ download_audio(audio_file_id, dest_path) + +

+ + +
+ +

Download the audio file +Args: + audio_file_id (str): the audio file id + dest_path (str): the destination

+

Returns:

+ +
+ Source code in Agent/storage.py +
204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
def download_audio(self, audio_file_id, dest_path: Path):
+    """
+    Download the audio file
+    Args:
+        audio_file_id (str): the audio file id
+        dest_path (str): the destination
+
+    Returns:
+
+    """
+    link_json = self.api.download_file_link(audio_file_id, "audio")
+    audio_url = link_json.get("audio_url", None)
+    if audio_url is None:
+        return
+
+    try:
+        r = requests.get(audio_url, stream=True)
+
+        if r.status_code != 404:
+            with open(dest_path, "wb") as f:
+                for chunk in r.iter_content(chunk_size=1024):
+                    if chunk:
+                        f.write(chunk)
+        else:
+            logger.error(f"Error downloading audio file: {audio_url}, NOT FOUND")
+    except Exception as e:
+        logger.error(f"Error downloading audio file: {e}")
+
+
+
+ +
+ +
+ + +

+ download_data(files) + +

+ + +
+ +

Download the data from the cloud +Args: + files:

+

Returns:

+ +
+ Source code in Agent/storage.py +
165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
def download_data(self, files):
+    """
+    Download the data from the cloud
+    Args:
+        files:
+
+    Returns:
+
+    """
+    audio_files = files.get("audio_files", [])
+    video_files = files.get("video_files", [])
+    logger.info(
+        f"Checking {len(audio_files)} audio files and {len(video_files)} video files"
+    )
+    for audio_file in audio_files:
+        dest_path = (
+                CLIENT_DATA_FOLDER
+                / "audio"
+                / audio_file["uid"]
+                / audio_file["audio_file"]
+        )
+        if not dest_path.exists():
+            # TODO: do the download here
+            logger.info(f"Downloading {audio_file['audio_file']} to {dest_path}")
+            dest_path.parent.mkdir(parents=True, exist_ok=True)
+            self.download_audio(audio_file["id"], dest_path)
+    for video_file in video_files:
+        dest_path = (
+                CLIENT_DATA_FOLDER
+                / "videos"
+                / video_file["uid"]
+                / video_file["video_file"]
+        )
+        if not dest_path.exists():
+            # TODO: do the download here
+            logger.info(f"Downloading {video_file['video_file']} to {dest_path}")
+            dest_path.parent.mkdir(parents=True, exist_ok=True)
+            self.download_video(video_file["id"], dest_path)
+
+
+
+ +
+ +
+ + +

+ download_video(video_file_id, dest_path) + +

+ + +
+ +

Download the video file +Args: + video_file_id (str): the video file id + dest_path (str): the destination

+

Returns:

+ +
+ Source code in Agent/storage.py +
232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
def download_video(self, video_file_id, dest_path: Path):
+    """
+    Download the video file
+    Args:
+        video_file_id (str): the video file id
+        dest_path (str): the destination
+
+    Returns:
+
+    """
+    link_json = self.api.download_file_link(video_file_id, "video")
+    video_url = link_json.get("video_url", None)
+    frames = link_json.get("frames", None)
+    logger.info(f"video_url: {video_url}, frames: {frames}")
+    if video_url is not None:
+        try:
+            r = requests.get(video_url, stream=True)
+            if r.status_code != 404:
+                with open(dest_path, "wb") as f:
+                    for chunk in r.iter_content(chunk_size=1024):
+                        if chunk:
+                            f.write(chunk)
+            else:
+                logger.error(
+                    f"Error downloading video file: {video_url}, NOT FOUND"
+                )
+        except Exception as e:
+            logger.error(f"Error downloading video file: {e}")
+
+    for frame_url in frames:
+        # rsplit from the third /, get the last part
+
+        frame_path = dest_path.parent / "frames" / frame_url.rsplit("/", 3)[-1]
+        logger.info(f"Downloading frame {frame_url} to {frame_path}")
+        if frame_path.exists():
+            continue
+        try:
+            r = requests.get(frame_url, stream=True)
+            with open(frame_path, "wb") as f:
+                for chunk in r.iter_content(chunk_size=1024):
+                    if chunk:
+                        f.write(chunk)
+        except Exception as e:
+            logger.error(f"Error downloading frame file: {e}")
+
+
+
+ +
+ +
+ + +

+ sync_pull_api() + +

+ + +
+ +

Sync the data from api

+ +
+ Source code in Agent/storage.py +
148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
def sync_pull_api(self):
+    """
+    Sync the data from api
+    """
+    from_time = None
+    while True:
+        try:
+            logger.info(f"Syncing data from {from_time}")
+            files = self.api.list_files(from_time=from_time)
+            # set from time to now for the next sync in timestamp format
+            from_time = time.time()
+            self.download_data(files)
+        except Exception as e:
+            logger.error(f"Error syncing data: {e}")
+            logger.exception(e)
+        time.sleep(1)
+
+
+
+ +
+ +
+ + +

+ sync_pull_data() + +

+ + +
+ +

If storage solution is volume or local, this means the data is accessible locally, do not need to worry about it +This will first call cloud to list all audio and video files +And then compare them with local ones +If there is any new files, download them

+

Returns:

+ +
+ Source code in Agent/storage.py +
111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
def sync_pull_data(self):
+    """
+    If storage solution is volume or local, this means the data is accessible locally, do not need to worry about it
+    This will first call cloud to list all audio and video files
+    And then compare them with local ones
+    If there is any new files, download them
+
+    Returns:
+
+    """
+    if self.storage_solution == "volume":
+        return
+    if self.storage_solution == "local":
+        self.sync_pull_local()
+    if self.storage_solution == "s3":
+        self.sync_pull_s3()
+    if self.storage_solution == "api":
+        self.sync_pull_api()
+
+
+
+ +
+ +
+ + +

+ sync_pull_local() + +

+ + +
+ +

Sync the data from the local network +directly run the rsync command

+ +
+ Source code in Agent/storage.py +
130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
def sync_pull_local(self):
+    """
+    Sync the data from the local network
+    directly run the rsync command
+    """
+    while True:
+        os.system(
+            "sshpass -p {} rsync -avz {} {}".format(self.dest_password, self.input_source_dir,
+                                                    str(CLIENT_DATA_FOLDER))
+        )
+        time.sleep(1)
+
+
+
+ +
+ +
+ + +

+ sync_pull_s3() + +

+ + +
+ +

Sync the data from s3

+ +
+ Source code in Agent/storage.py +
142
+143
+144
+145
+146
def sync_pull_s3(self):
+    """
+    Sync the data from s3
+    """
+    pass
+
+
+
+ +
+ +
+ + +

+ sync_push_api() + +

+ + +
+ +

Sync the data to the api

+ +
+ Source code in Agent/storage.py +
 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
def sync_push_api(self):
+    """
+    Sync the data to the api
+    """
+    observer = Observer()
+    api_handler = APISyncHandler(self.api)
+    logger.info(str(DATA_DIR / "tts"))
+    observer.schedule(api_handler, str(DATA_DIR / "tts"), recursive=True)
+    observer.start()
+    try:
+        while True:
+            time.sleep(1)
+    except KeyboardInterrupt:
+        observer.stop()
+    observer.join()
+
+
+
+ +
+ +
+ + +

+ sync_push_data() + +

+ + +
+ +

Sync the data to the storage

+ +
+ Source code in Agent/storage.py +
47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
def sync_push_data(self):
+    """
+    Sync the data to the storage
+    """
+    if self.storage_solution == "volume":
+        return
+    if self.storage_solution == "s3":
+        self.sync_push_s3()
+    if self.storage_solution == "local":
+        self.sync_push_local()
+    if self.storage_solution == "api":
+        self.sync_push_api()
+
+
+
+ +
+ +
+ + +

+ sync_push_local() + +

+ + +
+ +

Sync the data to the local network

+ +
+ Source code in Agent/storage.py +
60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
def sync_push_local(self):
+    """
+    Sync the data to the local network
+    """
+    observer = Observer()
+    local_handler = LocalSyncHandler(
+        src_path=str(DATA_DIR / "tts"),
+        dest_path=self.output_dest_dir,
+        sshpass=self.dest_password,
+    )
+    observer.schedule(local_handler, str(DATA_DIR / "tts"), recursive=True)
+    observer.start()
+    try:
+        while True:
+            time.sleep(1)
+    except KeyboardInterrupt:
+        observer.stop()
+    observer.join()
+
+
+
+ +
+ +
+ + +

+ sync_push_s3() + + + staticmethod + + +

+ + +
+ +

Sync the data to the s3

+ +
+ Source code in Agent/storage.py +
79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
@staticmethod
+def sync_push_s3():
+    """
+    Sync the data to the s3
+    """
+    observer = Observer()
+    s3_handler = S3SyncHandler(s3_client=boto3.client("s3"))
+    observer.schedule(s3_handler, str(DATA_DIR / "tts"), recursive=True)
+    observer.start()
+    try:
+        while True:
+            time.sleep(1)
+    except KeyboardInterrupt:
+        observer.stop()
+    observer.join()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/api/index.html b/Sources/Agent/utils/api/index.html new file mode 100644 index 00000000..587f8226 --- /dev/null +++ b/Sources/Agent/utils/api/index.html @@ -0,0 +1,5921 @@ + + + + + + + + + + + + + + + + + + + + + + + + + API - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

API

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ API + + +

+ + +
+ + +

This is the class to communicate with the API component

+ +
+ Source code in Agent/utils/api.py +
 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
class API:
+    """
+    This is the class to communicate with the API component
+    """
+
+    def __init__(
+        self,
+        domain: str = API_DOMAIN,
+        token: str = "",
+        uuid: str = "",
+        task_name: str = "llm",
+    ):
+        """
+        Init API class to communicate with the API
+        Args:
+            domain (str): The domain of the API
+            token (str): The token to authenticate
+            uuid (str): The UUID of the worker
+            task_name (str): The task type of the worker
+        """
+        self.domain = domain
+        self.token = token
+        self.task_name = task_name
+        self.uuid = uuid
+        self.mac_address = getmac.get_mac_address()
+        self.ip_address = self.get_local_ip()
+
+    def verify_token(self) -> bool:
+        try:
+            url = f"{self.domain}/authenticate/api/token/verify/"
+            r = requests.post(
+                url,
+                headers={"Authorization": f"Token {self.token}"},
+                data={"token": self.token},
+            )
+            logger.info(f"POST {url} {r.status_code}")
+            logger.info(r.json())
+            if r.status_code != 200:
+                return False
+            return True
+        except Exception as e:
+            logger.error(f"Error verifying token: {e}")
+            return False
+
+    def get_available_models(self):
+        """
+        Get the available LLM models from the API
+        Returns:
+
+        """
+        url = f"{self.domain}/llm/config"
+        r = requests.get(url, headers={"Authorization": f"Token {self.token}"})
+        logger.info(f"GET {url} {r.status_code}")
+        return r.json()
+
+    def get_task(self):
+        """
+        Get the task from the API
+        Returns:
+
+        """
+        logger.debug(self.task_name)
+        url = f"{self.domain}/queue_task/task/{self.task_name}/"
+        r = requests.get(url, headers={"Authorization": f"Token {self.token}"})
+        logger.info(f"GET {url} {r.status_code}")
+        logger.info(r.text)
+        if r.status_code != 200:
+            return None
+        return r.json()
+
+    def post_task_result(
+        self,
+        task: Task,
+    ):
+        """
+        Post the task result to the API
+        Args:
+            task[Task]: The task to post the result
+
+        Returns:
+
+        """
+        url = f"{self.domain}/queue_task/{task.id}/update_result/"
+        r = requests.post(
+            url,
+            data=task.json(),
+            headers={
+                "Authorization": f"Token {self.token}",
+                "Content-Type": "application/json",
+            },
+        )
+        logger.info(f"POST {url} {r.status_code}")
+        logger.info(r.text)
+        if r.status_code != 200:
+            return None
+        return r.json()
+
+    def register_or_update_worker(self):
+        """
+        Register or update the  worker
+        So we can know whether the worker is alive or not
+        """
+        try:
+            url = f"{self.domain}/queue_task/worker/"
+            r = requests.post(
+                url,
+                data={
+                    "uuid": self.uuid,
+                    "mac_address": self.mac_address,
+                    "ip_address": self.ip_address,
+                    "task_name": self.task_name,
+                },
+                headers={"Authorization": f"Token {self.token}"},
+            )
+            logger.info(f"POST {url} {r.status_code}")
+            # logger.info(r.text)
+            return r.json()
+        except Exception as e:
+            logger.error(f"Error registering worker: {e}")
+
+    @staticmethod
+    def get_local_ip() -> str:
+        """
+        Get the local IP address
+        Returns:
+            str: The local IP address
+
+        """
+        # Create a socket object
+        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        try:
+            # doesn't matter if the address is reachable
+            s.connect(("10.255.255.255", 1))
+            ip = s.getsockname()[0]
+        except Exception as e:
+            logger.error(f"Error getting local IP: {e}")
+            ip = "127.0.0.1"
+        finally:
+            s.close()
+        return ip
+
+    def get_storage_solution(self):
+        """
+        Get the storage solution from the API
+        Returns:
+
+        """
+        url = f"{self.domain}/hardware/storage_solution/"
+        r = requests.get(
+            url, headers={"Authorization": f"Token {self.token}"}, timeout=30
+        )
+        logger.info(f"GET {url} {r.status_code}")
+        if r.status_code != 200:
+            return None
+        data = r.json()
+        logger.info(data)
+        return data.get("storage_solution", "volume")
+
+    def upload_file(
+        self,
+        source_file: str,
+        dest_path: str,
+    ):
+        """
+        Upload the file to the API
+        """
+        url = f"{self.domain}/hardware/upload_file/"
+        files = {"file": open(source_file, "rb")}
+        data = {
+            "dest_path": dest_path,
+        }
+        r = requests.post(
+            url,
+            files=files,
+            data=data,
+            headers={"Authorization": f"Token {self.token}"},
+            timeout=30,
+        )
+        logger.info(f"POST {url} {r.status_code}")
+        if r.status_code != 200:
+            return None
+        return True
+
+    def list_files(self, from_time=None):
+        """
+        List the files from the API
+        """
+        url = f"{self.domain}/hardware/list_files/"
+        data = {
+            "from_time": from_time,
+        }
+        r = requests.get(
+            url,
+            data=data,
+            headers={"Authorization": f"Token {self.token}"},
+        )
+        logger.info(f"GET {url} {r.status_code}")
+        if r.status_code != 200:
+            return None
+        return r.json()
+
+    def download_file_link(self, file_id, file_type):
+        """
+        Get the download file link
+        """
+        url = f"{self.domain}/hardware/download_file_link/"
+        data = {
+            "file_id": file_id,
+            "file_type": file_type,
+        }
+
+        r = requests.get(
+            url,
+            data=data,
+            headers={"Authorization": f"Token {self.token}"},
+        )
+        logger.info(f"GET {url} {r.status_code}")
+        if r.status_code != 200:
+            return None
+        return r.json()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(domain=API_DOMAIN, token='', uuid='', task_name='llm') + +

+ + +
+ +

Init API class to communicate with the API +Args: + domain (str): The domain of the API + token (str): The token to authenticate + uuid (str): The UUID of the worker + task_name (str): The task type of the worker

+ +
+ Source code in Agent/utils/api.py +
19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
def __init__(
+    self,
+    domain: str = API_DOMAIN,
+    token: str = "",
+    uuid: str = "",
+    task_name: str = "llm",
+):
+    """
+    Init API class to communicate with the API
+    Args:
+        domain (str): The domain of the API
+        token (str): The token to authenticate
+        uuid (str): The UUID of the worker
+        task_name (str): The task type of the worker
+    """
+    self.domain = domain
+    self.token = token
+    self.task_name = task_name
+    self.uuid = uuid
+    self.mac_address = getmac.get_mac_address()
+    self.ip_address = self.get_local_ip()
+
+
+
+ +
+ +
+ + + + + +
+ +

Get the download file link

+ +
+ Source code in Agent/utils/api.py +
215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
def download_file_link(self, file_id, file_type):
+    """
+    Get the download file link
+    """
+    url = f"{self.domain}/hardware/download_file_link/"
+    data = {
+        "file_id": file_id,
+        "file_type": file_type,
+    }
+
+    r = requests.get(
+        url,
+        data=data,
+        headers={"Authorization": f"Token {self.token}"},
+    )
+    logger.info(f"GET {url} {r.status_code}")
+    if r.status_code != 200:
+        return None
+    return r.json()
+
+
+
+ +
+ +
+ + +

+ get_available_models() + +

+ + +
+ +

Get the available LLM models from the API +Returns:

+ +
+ Source code in Agent/utils/api.py +
58
+59
+60
+61
+62
+63
+64
+65
+66
+67
def get_available_models(self):
+    """
+    Get the available LLM models from the API
+    Returns:
+
+    """
+    url = f"{self.domain}/llm/config"
+    r = requests.get(url, headers={"Authorization": f"Token {self.token}"})
+    logger.info(f"GET {url} {r.status_code}")
+    return r.json()
+
+
+
+ +
+ +
+ + +

+ get_local_ip() + + + staticmethod + + +

+ + +
+ +

Get the local IP address +Returns: + str: The local IP address

+ +
+ Source code in Agent/utils/api.py +
134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
@staticmethod
+def get_local_ip() -> str:
+    """
+    Get the local IP address
+    Returns:
+        str: The local IP address
+
+    """
+    # Create a socket object
+    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+    try:
+        # doesn't matter if the address is reachable
+        s.connect(("10.255.255.255", 1))
+        ip = s.getsockname()[0]
+    except Exception as e:
+        logger.error(f"Error getting local IP: {e}")
+        ip = "127.0.0.1"
+    finally:
+        s.close()
+    return ip
+
+
+
+ +
+ +
+ + +

+ get_storage_solution() + +

+ + +
+ +

Get the storage solution from the API +Returns:

+ +
+ Source code in Agent/utils/api.py +
155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
def get_storage_solution(self):
+    """
+    Get the storage solution from the API
+    Returns:
+
+    """
+    url = f"{self.domain}/hardware/storage_solution/"
+    r = requests.get(
+        url, headers={"Authorization": f"Token {self.token}"}, timeout=30
+    )
+    logger.info(f"GET {url} {r.status_code}")
+    if r.status_code != 200:
+        return None
+    data = r.json()
+    logger.info(data)
+    return data.get("storage_solution", "volume")
+
+
+
+ +
+ +
+ + +

+ get_task() + +

+ + +
+ +

Get the task from the API +Returns:

+ +
+ Source code in Agent/utils/api.py +
69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
def get_task(self):
+    """
+    Get the task from the API
+    Returns:
+
+    """
+    logger.debug(self.task_name)
+    url = f"{self.domain}/queue_task/task/{self.task_name}/"
+    r = requests.get(url, headers={"Authorization": f"Token {self.token}"})
+    logger.info(f"GET {url} {r.status_code}")
+    logger.info(r.text)
+    if r.status_code != 200:
+        return None
+    return r.json()
+
+
+
+ +
+ +
+ + +

+ list_files(from_time=None) + +

+ + +
+ +

List the files from the API

+ +
+ Source code in Agent/utils/api.py +
197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
def list_files(self, from_time=None):
+    """
+    List the files from the API
+    """
+    url = f"{self.domain}/hardware/list_files/"
+    data = {
+        "from_time": from_time,
+    }
+    r = requests.get(
+        url,
+        data=data,
+        headers={"Authorization": f"Token {self.token}"},
+    )
+    logger.info(f"GET {url} {r.status_code}")
+    if r.status_code != 200:
+        return None
+    return r.json()
+
+
+
+ +
+ +
+ + +

+ post_task_result(task) + +

+ + +
+ +

Post the task result to the API +Args: + task[Task]: The task to post the result

+

Returns:

+ +
+ Source code in Agent/utils/api.py +
 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
def post_task_result(
+    self,
+    task: Task,
+):
+    """
+    Post the task result to the API
+    Args:
+        task[Task]: The task to post the result
+
+    Returns:
+
+    """
+    url = f"{self.domain}/queue_task/{task.id}/update_result/"
+    r = requests.post(
+        url,
+        data=task.json(),
+        headers={
+            "Authorization": f"Token {self.token}",
+            "Content-Type": "application/json",
+        },
+    )
+    logger.info(f"POST {url} {r.status_code}")
+    logger.info(r.text)
+    if r.status_code != 200:
+        return None
+    return r.json()
+
+
+
+ +
+ +
+ + +

+ register_or_update_worker() + +

+ + +
+ +

Register or update the worker +So we can know whether the worker is alive or not

+ +
+ Source code in Agent/utils/api.py +
111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
def register_or_update_worker(self):
+    """
+    Register or update the  worker
+    So we can know whether the worker is alive or not
+    """
+    try:
+        url = f"{self.domain}/queue_task/worker/"
+        r = requests.post(
+            url,
+            data={
+                "uuid": self.uuid,
+                "mac_address": self.mac_address,
+                "ip_address": self.ip_address,
+                "task_name": self.task_name,
+            },
+            headers={"Authorization": f"Token {self.token}"},
+        )
+        logger.info(f"POST {url} {r.status_code}")
+        # logger.info(r.text)
+        return r.json()
+    except Exception as e:
+        logger.error(f"Error registering worker: {e}")
+
+
+
+ +
+ +
+ + +

+ upload_file(source_file, dest_path) + +

+ + +
+ +

Upload the file to the API

+ +
+ Source code in Agent/utils/api.py +
172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
def upload_file(
+    self,
+    source_file: str,
+    dest_path: str,
+):
+    """
+    Upload the file to the API
+    """
+    url = f"{self.domain}/hardware/upload_file/"
+    files = {"file": open(source_file, "rb")}
+    data = {
+        "dest_path": dest_path,
+    }
+    r = requests.post(
+        url,
+        files=files,
+        data=data,
+        headers={"Authorization": f"Token {self.token}"},
+        timeout=30,
+    )
+    logger.info(f"POST {url} {r.status_code}")
+    if r.status_code != 200:
+        return None
+    return True
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/aws/index.html b/Sources/Agent/utils/aws/index.html new file mode 100644 index 00000000..d12277cd --- /dev/null +++ b/Sources/Agent/utils/aws/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + AWS - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/constants/index.html b/Sources/Agent/utils/constants/index.html new file mode 100644 index 00000000..9bc8c6ff --- /dev/null +++ b/Sources/Agent/utils/constants/index.html @@ -0,0 +1,4691 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Constants - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Constants

+ +
+ + + + +
+ + + +
+ + + + + + + +
+ + + +

+ NORMAL_MODELS = [BERT] + + + module-attribute + + +

+ + +
+ +
LLM_MODEL_DIR = BASE_DIR / "llm" / "models"
+
+API_DOMAIN = "http://localhost:8000"  # default domain
+
+# model types
+HF_LLAMA = "HuggingFace"
+MT_LLAMA = "llama.cpp"
+MT_API = "api"
+MT_CHATGLM = "chatglm.cpp"
+MODEL_TYPES = [HF_LLAMA, MT_LLAMA, MT_API, MT_CHATGLM]
+
+# model names
+MN_LLAMA2 = "llama2"
+MN_GEMMA = "gemma"
+
+BERT = "bert"
+NORMAL_MODELS = [BERT]
+
+
+ +
+ + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/get_logger/index.html b/Sources/Agent/utils/get_logger/index.html new file mode 100644 index 00000000..9b1db6fb --- /dev/null +++ b/Sources/Agent/utils/get_logger/index.html @@ -0,0 +1,4719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + GetLogger - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

GetLogger

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ get_logger(logger_name=None, stream=True) + +

+ + +
+ +

init the logger, give it proper format, log them both in terminal stream and file

+ +
+ Source code in Agent/utils/get_logger.py +
 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
def get_logger(logger_name: Optional[str] = None, stream: bool = True):
+    """
+    init the logger, give it proper format, log them both in terminal stream and file
+    """
+    logging.basicConfig(
+        format="%(name)s: %(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
+        datefmt="%Y-%m-%d:%H:%M:%S",
+        level=logging.INFO,
+    )
+
+    logger = logging.getLogger(logger_name)
+    logger.setLevel(logging.INFO)
+    logger.propagate = False
+    formatter = logging.Formatter(
+        "CLIENT: %(name)s | %(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
+    )
+    if not logger.hasHandlers() and stream:
+        stdout_handler = logging.StreamHandler()
+        stdout_handler.setFormatter(formatter)
+        stdout_handler.setLevel(logging.INFO)
+        logger.addHandler(stdout_handler)
+
+    return logger
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/storage/api_sync_handler/index.html b/Sources/Agent/utils/storage/api_sync_handler/index.html new file mode 100644 index 00000000..d5d3b624 --- /dev/null +++ b/Sources/Agent/utils/storage/api_sync_handler/index.html @@ -0,0 +1,4757 @@ + + + + + + + + + + + + + + + + + + + + + + + + + APISyncHandler - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

APISyncHandler

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ APISyncHandler + + +

+ + +
+

+ Bases: FileSystemEventHandler

+ + +

Sync the files to s3 when they are created, modified, moved or deleted

+ +
+ Source code in Agent/utils/storage/api_sync_handler.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
class APISyncHandler(FileSystemEventHandler):
+    """
+    Sync the files to s3 when they are created, modified, moved or deleted
+    """
+
+    def __init__(self, api: API):
+        super().__init__()
+        self.api = api
+
+    def on_any_event(self, event):
+        if event.is_directory:
+            return None
+
+        elif event.event_type in ("created", "modified", "moved", "deleted"):
+            # print(f"Event type: {event.event_type} - Path: {event.src_path}")
+            # only process .avi and .wav files
+            if event.src_path.split("/")[-1].split(".")[-1] not in [
+                "mp4",
+                "wav",
+                "mp3",
+            ]:
+                return None
+            try:
+                self.api.upload_file(
+                    event.src_path,
+                    f"Responder/{event.src_path.split(DATA_DIR.as_posix())[1].strip('/')}",
+                )
+                logger.info(f"Uploaded file to server: {event.src_path}")
+            except Exception as e:
+                logger.error(f"Error uploading file to s3: {e}")
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/storage/local_sync_handler/index.html b/Sources/Agent/utils/storage/local_sync_handler/index.html new file mode 100644 index 00000000..3ded220d --- /dev/null +++ b/Sources/Agent/utils/storage/local_sync_handler/index.html @@ -0,0 +1,5024 @@ + + + + + + + + + + + + + + + + + + + + + + + + + LocalSyncHandler - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

LocalSyncHandler

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ LocalSyncHandler + + +

+ + +
+

+ Bases: FileSystemEventHandler

+ + +

Sync the files to disk when they are created, modified, moved or deleted

+ +
+ Source code in Agent/utils/storage/local_sync_handler.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
class LocalSyncHandler(FileSystemEventHandler):
+    """
+    Sync the files to disk when they are created, modified, moved or deleted
+    """
+
+    def __init__(self, src_path: str, dest_path: str, sshpass: str):
+        """
+
+        Args:
+            src_path (str): The source path to sync
+            dest_path (str): The destination path to sync
+            sshpass (str): The password to ssh
+        """
+        super().__init__()
+        self.src_path = src_path
+        self.dest_path = dest_path
+        self.sshpass = sshpass
+
+    def on_any_event(self, event):
+        """
+        Sync the files to disk when they are created, modified, moved or deleted
+        Args:
+            event:
+
+        Returns:
+
+        """
+        if event.is_directory:
+            return None
+        else:
+            if self.sshpass:
+                subprocess.call(
+                    [
+                        "sshpass",
+                        "-p",
+                        self.sshpass,
+                        "rsync",
+                        "-avz",
+                        "--delete",
+                        self.src_path,
+                        self.dest_path,
+                    ]
+                )
+            else:
+                # wer can set up the authentication first, then we can use the rsync command
+                subprocess.call(
+                    ["rsync", "-avz", "--delete", self.src_path, self.dest_path]
+                )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(src_path, dest_path, sshpass) + +

+ + +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
src_path + str + +
+

The source path to sync

+
+
+ required +
dest_path + str + +
+

The destination path to sync

+
+
+ required +
sshpass + str + +
+

The password to ssh

+
+
+ required +
+ +
+ Source code in Agent/utils/storage/local_sync_handler.py +
15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
def __init__(self, src_path: str, dest_path: str, sshpass: str):
+    """
+
+    Args:
+        src_path (str): The source path to sync
+        dest_path (str): The destination path to sync
+        sshpass (str): The password to ssh
+    """
+    super().__init__()
+    self.src_path = src_path
+    self.dest_path = dest_path
+    self.sshpass = sshpass
+
+
+
+ +
+ +
+ + +

+ on_any_event(event) + +

+ + +
+ +

Sync the files to disk when they are created, modified, moved or deleted +Args: + event:

+

Returns:

+ +
+ Source code in Agent/utils/storage/local_sync_handler.py +
28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
def on_any_event(self, event):
+    """
+    Sync the files to disk when they are created, modified, moved or deleted
+    Args:
+        event:
+
+    Returns:
+
+    """
+    if event.is_directory:
+        return None
+    else:
+        if self.sshpass:
+            subprocess.call(
+                [
+                    "sshpass",
+                    "-p",
+                    self.sshpass,
+                    "rsync",
+                    "-avz",
+                    "--delete",
+                    self.src_path,
+                    self.dest_path,
+                ]
+            )
+        else:
+            # wer can set up the authentication first, then we can use the rsync command
+            subprocess.call(
+                ["rsync", "-avz", "--delete", self.src_path, self.dest_path]
+            )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/storage/s3_sync_handler/index.html b/Sources/Agent/utils/storage/s3_sync_handler/index.html new file mode 100644 index 00000000..c3788b87 --- /dev/null +++ b/Sources/Agent/utils/storage/s3_sync_handler/index.html @@ -0,0 +1,4759 @@ + + + + + + + + + + + + + + + + + + + + + + + S3SyncHandler - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

S3SyncHandler

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ S3SyncHandler + + +

+ + +
+

+ Bases: FileSystemEventHandler

+ + +

Sync the files to s3 when they are created, modified, moved or deleted

+ +
+ Source code in Agent/utils/storage/s3_sync_handler.py +
 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
class S3SyncHandler(FileSystemEventHandler):
+    """
+    Sync the files to s3 when they are created, modified, moved or deleted
+    """
+
+    def __init__(self, s3_client):
+        super().__init__()
+        self.s3_client = s3_client
+
+    def on_any_event(self, event):
+        if event.is_directory:
+            return None
+
+        elif event.event_type in ("created", "modified", "moved", "deleted"):
+            # print(f"Event type: {event.event_type} - Path: {event.src_path}")
+            # only process .avi and .wav files
+            if event.src_path.split("/")[-1].split(".")[-1] not in [
+                "mp4",
+                "wav",
+                "mp3",
+            ]:
+                return None
+            try:
+                self.s3_client.upload_file(
+                    event.src_path,
+                    S3_BUCKET,
+                    f"Responder/{event.src_path.split(DATA_DIR.as_posix())[1].strip('/')}",
+                )
+                logger.info(f"Uploaded file to s3: {event.src_path}")
+                # logger.info(f"Listener/{event.src_path.split(DATA_DIR.as_posix())[1].strip('/')}")
+            except Exception as e:
+                logger.error(f"Error uploading file to s3: {e}")
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/time_logger/index.html b/Sources/Agent/utils/time_logger/index.html new file mode 100644 index 00000000..c73b89d0 --- /dev/null +++ b/Sources/Agent/utils/time_logger/index.html @@ -0,0 +1,4955 @@ + + + + + + + + + + + + + + + + + + + + + + + + + TimeLogger - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

TimeLogger

+ +
+ + + + +
+ + + +
+ + + + + + + +
+ + + +

+ logger = get_logger(__name__) + + + module-attribute + + +

+ + +
+ +

For the latency

+

If it is model, the name will start with model_xx, and it is a duration +If it is transfer time, the name will start with transfer_xx, and it is a duration +If it is just to log the timestamp, the name will start with ts_xx, and it is a timestamp

+
+ +
+ + +
+ + + +

+ TimeLogger + + +

+ + +
+ + +
+ Source code in Agent/utils/time_logger.py +
17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
class TimeLogger:
+
+    @staticmethod
+    def log_task(task: Task, name: str):
+        """
+        Log the time taken to execute a block of code
+        Args:
+            task (Task): The task to store the time
+            name (str): The name of the block
+
+        Returns:
+
+        """
+        # check whether the task has the latency profile
+
+        TimeLogger.log(task.result_json.latency_profile, name)
+
+    @staticmethod
+    def log(profile: dict, name: str):
+        """
+        Log the time taken to execute a block of code
+        Args:
+            profile (dict): The profile to store the time
+            name (str): The name of the block
+
+        Returns:
+
+        """
+        logger.info(profile)
+        logger.info(name)
+        profile[f"ts_{name}"] = datetime.now()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ log(profile, name) + + + staticmethod + + +

+ + +
+ +

Log the time taken to execute a block of code +Args: + profile (dict): The profile to store the time + name (str): The name of the block

+

Returns:

+ +
+ Source code in Agent/utils/time_logger.py +
34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
@staticmethod
+def log(profile: dict, name: str):
+    """
+    Log the time taken to execute a block of code
+    Args:
+        profile (dict): The profile to store the time
+        name (str): The name of the block
+
+    Returns:
+
+    """
+    logger.info(profile)
+    logger.info(name)
+    profile[f"ts_{name}"] = datetime.now()
+
+
+
+ +
+ +
+ + +

+ log_task(task, name) + + + staticmethod + + +

+ + +
+ +

Log the time taken to execute a block of code +Args: + task (Task): The task to store the time + name (str): The name of the block

+

Returns:

+ +
+ Source code in Agent/utils/time_logger.py +
19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
@staticmethod
+def log_task(task: Task, name: str):
+    """
+    Log the time taken to execute a block of code
+    Args:
+        task (Task): The task to store the time
+        name (str): The name of the block
+
+    Returns:
+
+    """
+    # check whether the task has the latency profile
+
+    TimeLogger.log(task.result_json.latency_profile, name)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/time_tracker/index.html b/Sources/Agent/utils/time_tracker/index.html new file mode 100644 index 00000000..7121af6e --- /dev/null +++ b/Sources/Agent/utils/time_tracker/index.html @@ -0,0 +1,4713 @@ + + + + + + + + + + + + + + + + + + + + + + + + + TimeTracker - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

TimeTracker

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ time_tracker(label, profile, track_type=TrackType.MODEL.value) + +

+ + +
+ +

Track the time taken to execute a block of code +Args: + label (str): The name of the block + profile (dict): The profile to store the time + track_type (str): The type of tracking

+ +
+ Source code in Agent/utils/time_tracker.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
@contextmanager
+def time_tracker(
+    label: str, profile: dict, track_type: TrackType = TrackType.MODEL.value
+):
+    """
+    Track the time taken to execute a block of code
+    Args:
+        label (str): The name of the block
+        profile (dict): The profile to store the time
+        track_type (str): The type of tracking
+    """
+    # It will be either model or transfer
+    start_time = time.time()
+    yield
+    end_time = time.time()
+    elapsed_time = end_time - start_time
+    profile[f"{track_type}_{label}"] = elapsed_time
+    logger.info(f"{label} took {elapsed_time} seconds")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Agent/utils/timer/index.html b/Sources/Agent/utils/timer/index.html new file mode 100644 index 00000000..8769971b --- /dev/null +++ b/Sources/Agent/utils/timer/index.html @@ -0,0 +1,4984 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Timer - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Timer

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ timer + + +

+ + +
+ + +

util function used to log the time taken by a part of program

+ +
+ Source code in Agent/utils/timer.py +
 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
class timer:
+    """
+    util function used to log the time taken by a part of program
+    """
+
+    def __init__(self, logger: Logger, message: str):
+        """
+        init the timer
+
+        Parameters
+        ----------
+        logger: Logger
+            logger to write the logs
+        message: str
+            message to log, like start xxx
+        """
+        self.message = message
+        self.logger = logger
+        self.start = 0
+        self.duration = 0
+        self.sub_timers = []
+
+    def __enter__(self):
+        """
+        context enter to start write this
+        """
+        self.start = time.time()
+        self.logger.info("Starting %s" % self.message)
+        return self
+
+    def __exit__(self, context, value, traceback):
+        """
+        context exit will write this
+        """
+        self.duration = time.time() - self.start
+        self.logger.info(f"Finished {self.message}, that took {self.duration:.3f}")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __enter__() + +

+ + +
+ +

context enter to start write this

+ +
+ Source code in Agent/utils/timer.py +
27
+28
+29
+30
+31
+32
+33
def __enter__(self):
+    """
+    context enter to start write this
+    """
+    self.start = time.time()
+    self.logger.info("Starting %s" % self.message)
+    return self
+
+
+
+ +
+ +
+ + +

+ __exit__(context, value, traceback) + +

+ + +
+ +

context exit will write this

+ +
+ Source code in Agent/utils/timer.py +
35
+36
+37
+38
+39
+40
def __exit__(self, context, value, traceback):
+    """
+    context exit will write this
+    """
+    self.duration = time.time() - self.start
+    self.logger.info(f"Finished {self.message}, that took {self.duration:.3f}")
+
+
+
+ +
+ +
+ + +

+ __init__(logger, message) + +

+ + +
+ +

init the timer

+
Parameters
+

logger: Logger + logger to write the logs +message: str + message to log, like start xxx

+ +
+ Source code in Agent/utils/timer.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
def __init__(self, logger: Logger, message: str):
+    """
+    init the timer
+
+    Parameters
+    ----------
+    logger: Logger
+        logger to write the logs
+    message: str
+        message to log, like start xxx
+    """
+    self.message = message
+    self.logger = logger
+    self.start = 0
+    self.duration = 0
+    self.sub_timers = []
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Listener/api/index.html b/Sources/Client/Listener/api/index.html new file mode 100644 index 00000000..671d009a --- /dev/null +++ b/Sources/Client/Listener/api/index.html @@ -0,0 +1,6006 @@ + + + + + + + + + + + + + + + + + + + + + + + + + API - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

API

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ API + + +

+ + +
+ + +

This is used to communicate with the API.

+
    +
  • Register the device
  • +
  • Post audio to the API
  • +
  • Post video to the API
  • +
  • [Optional] Queue speech to text
  • +
+ +
+ Source code in Client/Listener/api.py +
 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
class API:
+    """
+    This is used to communicate with the API.
+
+    - Register the device
+    - Post audio to the API
+    - Post video to the API
+    - [Optional] Queue speech to text
+    """
+
+    def __init__(
+        self,
+        domain: str = API_DOMAIN,
+        token: str = "",
+        home_id: Optional[int] = None,
+        track_cluster: Optional[str] = None,
+    ):
+        """
+        The API class for the responder
+
+        It will require the token and the endpoint to communicate with the API.
+
+        If you deploy the API to a cloud server, do not forget to change the domain to the server's domain.
+
+        Args:
+            domain (str): The domain of the API.
+            token (str): The token for the API.
+            home_id (int): The home ID.
+            track_cluster (str): The track cluster.
+
+        """
+        self.domain = domain
+        self.token = token
+        self.mac_address = get_mac_address()
+        self.home_id = home_id
+        self.track_cluster = track_cluster
+
+    def set_track_id(self):
+        if self.track_cluster is None:
+            return None
+        uid = str(uuid4())
+        uid = uid.replace("-", "")
+        track_id = f"T-{self.track_cluster}-{uid}"
+        logger.info(track_id)
+        return track_id
+
+    def register_device(
+        self,
+        device_name: Optional[str] = None,
+        device_type: Optional[str] = None,
+        description: Optional[str] = None,
+    ):
+        """
+        Register the device to the API.
+        Args:
+            device_name (Optional[str]): The device name, you can name it if you want to distinguish it better later
+            device_type (Optional[str]): The device type, this can be used to distinguish the device type
+            description (Optional[str]): The description of the device
+
+        Returns:
+
+        """
+        url = f"{self.domain}/hardware/register/"
+
+        r = requests.post(
+            url,
+            data={
+                "home": self.home_id,
+                "mac_address": self.mac_address,
+                "device_name": device_name,
+                "device_type": device_type,
+                "description": description,
+            },
+            headers={"Authorization": f"Token {self.token}"},
+            timeout=30,
+        )
+        logger.info(url)
+
+        logger.info(f"POST {url} {r.status_code}")
+
+    def post_audio(
+        self,
+        uid: str,
+        sequence_index: int,
+        audio_file: str,
+        start_time: datetime,
+        end_time: datetime,
+        track_id: str = None,
+    ):
+        """
+        Post metadata of the audio to the API.
+        Args:
+            uid (str): uuid of the audio
+            sequence_index (int): The sequence index of the audio in this loop, together with uuid,
+                                  it can be used to identify the audio
+            audio_file (str): Path to the audio file, which will be synced to the API disk storage via another parameter
+            start_time (datetime): The start time of the audio
+            end_time (datetime): The end time of the audio
+            track_id (str): The track id of the task
+
+        Returns:
+
+        """
+        url = f"{self.domain}/hardware/audio/"
+        r = requests.post(
+            url,
+            data={
+                "home": self.home_id,
+                "uid": uid,
+                "sequence_index": sequence_index,
+                "audio_file": audio_file,
+                "start_time": start_time,
+                "end_time": end_time,
+                "hardware_device_mac_address": self.mac_address,
+                "track_id": track_id,
+            },
+            headers={"Authorization": f"Token {self.token}"},
+            timeout=30,
+        )
+        logger.info(f"POST {url} {r.status_code}")
+        if r.status_code != 201:
+            return None
+        return r.json()
+
+    def post_video(
+        self, uid: str, video_file: str, start_time: datetime, end_time: datetime
+    ):
+        """
+        Post metadata of the video to the API.
+        Args:
+            uid (str): uuid of this video section
+            video_file (str): Path to the video file, which will be synced to the API disk storage via another parameter
+                              it will also hold the information in the file name about the start/end time
+            start_time (datetime): The start time of the video
+            end_time (datetime): The end time of the video
+        Returns:
+
+        """
+        url = f"{self.domain}/hardware/video/"
+        data = {
+            "home": self.home_id,
+            "uid": uid,
+            "hardware_device_mac_address": self.mac_address,
+            "video_file": video_file,
+            "start_time": start_time.isoformat(),
+            "end_time": end_time.isoformat(),
+        }
+        logger.info(data)
+        r = requests.post(
+            url, data=data, headers={"Authorization": f"Token {self.token}"}, timeout=30
+        )
+        logger.info(f"POST {url} {r.status_code}")
+        if r.status_code != 200:
+            return None
+        return r.json()
+
+    def queue_speech_to_text(
+        self, uid: str, audio_index: str, start_time: datetime, end_time: datetime
+    ) -> str:
+        """
+        Optional, used to queue the speech to text task
+        Args:
+            uid (str): uuid of the audio
+            audio_index (str): The audio index, which can be used to identify the audio
+            start_time (datetime): The start time of the audio
+            end_time (datetime): The end time of the audio
+
+        Returns:
+            (str): The track id of the task
+
+        """
+        track_id = self.set_track_id()
+        url = f"{self.domain}/queue_task/ai_task/"
+        data = {
+            "name": "speech_to_text",
+            "task_name": "speech2text",
+            "parameters": json.dumps(
+                {
+                    "uid": uid,
+                    "home_id": self.home_id,
+                    "audio_index": audio_index,
+                    "start_time": start_time.isoformat(),
+                    "end_time": end_time.isoformat(),
+                    "hardware_device_mac_address": self.mac_address,
+                }
+            ),
+            "track_id": track_id,
+        }
+        r = requests.post(
+            url, data=data, headers={"Authorization": f"Token {self.token}"}, timeout=30
+        )
+        logger.info(f"POST {url} {r.status_code}")
+        if r.status_code != 200:
+            logger.info(data)
+            return None
+        logger.info(r.json())
+        return track_id
+
+    def get_storage_solution(self):
+        """
+        Get the storage solution from the API
+        Returns:
+
+        """
+        url = f"{self.domain}/hardware/storage_solution/"
+        r = requests.get(
+            url, headers={"Authorization": f"Token {self.token}"}, timeout=30
+        )
+        logger.info(f"GET {url} {r.status_code}")
+        if r.status_code != 200:
+            return None
+        data = r.json()
+        logger.info(data)
+        return data.get("storage_solution", "local")
+
+    def upload_file(
+        self,
+        source_file: str,
+        dest_path: str,
+    ):
+        """
+        Upload the file to the API
+        """
+        url = f"{self.domain}/hardware/upload_file/"
+        files = {"file": open(source_file, "rb")}
+        data = {
+            "dest_path": dest_path,
+        }
+        r = requests.post(
+            url,
+            files=files,
+            data=data,
+            headers={"Authorization": f"Token {self.token}"},
+            timeout=30,
+        )
+        logger.info(f"POST {url} {r.status_code}")
+        if r.status_code != 200:
+            return None
+        return True
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(domain=API_DOMAIN, token='', home_id=None, track_cluster=None) + +

+ + +
+ +

The API class for the responder

+

It will require the token and the endpoint to communicate with the API.

+

If you deploy the API to a cloud server, do not forget to change the domain to the server's domain.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
domain + str + +
+

The domain of the API.

+
+
+ API_DOMAIN +
token + str + +
+

The token for the API.

+
+
+ '' +
home_id + int + +
+

The home ID.

+
+
+ None +
track_cluster + str + +
+

The track cluster.

+
+
+ None +
+ +
+ Source code in Client/Listener/api.py +
24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
def __init__(
+    self,
+    domain: str = API_DOMAIN,
+    token: str = "",
+    home_id: Optional[int] = None,
+    track_cluster: Optional[str] = None,
+):
+    """
+    The API class for the responder
+
+    It will require the token and the endpoint to communicate with the API.
+
+    If you deploy the API to a cloud server, do not forget to change the domain to the server's domain.
+
+    Args:
+        domain (str): The domain of the API.
+        token (str): The token for the API.
+        home_id (int): The home ID.
+        track_cluster (str): The track cluster.
+
+    """
+    self.domain = domain
+    self.token = token
+    self.mac_address = get_mac_address()
+    self.home_id = home_id
+    self.track_cluster = track_cluster
+
+
+
+ +
+ +
+ + +

+ get_storage_solution() + +

+ + +
+ +

Get the storage solution from the API +Returns:

+ +
+ Source code in Client/Listener/api.py +
212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
def get_storage_solution(self):
+    """
+    Get the storage solution from the API
+    Returns:
+
+    """
+    url = f"{self.domain}/hardware/storage_solution/"
+    r = requests.get(
+        url, headers={"Authorization": f"Token {self.token}"}, timeout=30
+    )
+    logger.info(f"GET {url} {r.status_code}")
+    if r.status_code != 200:
+        return None
+    data = r.json()
+    logger.info(data)
+    return data.get("storage_solution", "local")
+
+
+
+ +
+ +
+ + +

+ post_audio(uid, sequence_index, audio_file, start_time, end_time, track_id=None) + +

+ + +
+ +

Post metadata of the audio to the API. +Args: + uid (str): uuid of the audio + sequence_index (int): The sequence index of the audio in this loop, together with uuid, + it can be used to identify the audio + audio_file (str): Path to the audio file, which will be synced to the API disk storage via another parameter + start_time (datetime): The start time of the audio + end_time (datetime): The end time of the audio + track_id (str): The track id of the task

+

Returns:

+ +
+ Source code in Client/Listener/api.py +
 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
def post_audio(
+    self,
+    uid: str,
+    sequence_index: int,
+    audio_file: str,
+    start_time: datetime,
+    end_time: datetime,
+    track_id: str = None,
+):
+    """
+    Post metadata of the audio to the API.
+    Args:
+        uid (str): uuid of the audio
+        sequence_index (int): The sequence index of the audio in this loop, together with uuid,
+                              it can be used to identify the audio
+        audio_file (str): Path to the audio file, which will be synced to the API disk storage via another parameter
+        start_time (datetime): The start time of the audio
+        end_time (datetime): The end time of the audio
+        track_id (str): The track id of the task
+
+    Returns:
+
+    """
+    url = f"{self.domain}/hardware/audio/"
+    r = requests.post(
+        url,
+        data={
+            "home": self.home_id,
+            "uid": uid,
+            "sequence_index": sequence_index,
+            "audio_file": audio_file,
+            "start_time": start_time,
+            "end_time": end_time,
+            "hardware_device_mac_address": self.mac_address,
+            "track_id": track_id,
+        },
+        headers={"Authorization": f"Token {self.token}"},
+        timeout=30,
+    )
+    logger.info(f"POST {url} {r.status_code}")
+    if r.status_code != 201:
+        return None
+    return r.json()
+
+
+
+ +
+ +
+ + +

+ post_video(uid, video_file, start_time, end_time) + +

+ + +
+ +

Post metadata of the video to the API. +Args: + uid (str): uuid of this video section + video_file (str): Path to the video file, which will be synced to the API disk storage via another parameter + it will also hold the information in the file name about the start/end time + start_time (datetime): The start time of the video + end_time (datetime): The end time of the video +Returns:

+ +
+ Source code in Client/Listener/api.py +
138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
def post_video(
+    self, uid: str, video_file: str, start_time: datetime, end_time: datetime
+):
+    """
+    Post metadata of the video to the API.
+    Args:
+        uid (str): uuid of this video section
+        video_file (str): Path to the video file, which will be synced to the API disk storage via another parameter
+                          it will also hold the information in the file name about the start/end time
+        start_time (datetime): The start time of the video
+        end_time (datetime): The end time of the video
+    Returns:
+
+    """
+    url = f"{self.domain}/hardware/video/"
+    data = {
+        "home": self.home_id,
+        "uid": uid,
+        "hardware_device_mac_address": self.mac_address,
+        "video_file": video_file,
+        "start_time": start_time.isoformat(),
+        "end_time": end_time.isoformat(),
+    }
+    logger.info(data)
+    r = requests.post(
+        url, data=data, headers={"Authorization": f"Token {self.token}"}, timeout=30
+    )
+    logger.info(f"POST {url} {r.status_code}")
+    if r.status_code != 200:
+        return None
+    return r.json()
+
+
+
+ +
+ +
+ + +

+ queue_speech_to_text(uid, audio_index, start_time, end_time) + +

+ + +
+ +

Optional, used to queue the speech to text task +Args: + uid (str): uuid of the audio + audio_index (str): The audio index, which can be used to identify the audio + start_time (datetime): The start time of the audio + end_time (datetime): The end time of the audio

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

The track id of the task

+
+
+ +
+ Source code in Client/Listener/api.py +
170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
def queue_speech_to_text(
+    self, uid: str, audio_index: str, start_time: datetime, end_time: datetime
+) -> str:
+    """
+    Optional, used to queue the speech to text task
+    Args:
+        uid (str): uuid of the audio
+        audio_index (str): The audio index, which can be used to identify the audio
+        start_time (datetime): The start time of the audio
+        end_time (datetime): The end time of the audio
+
+    Returns:
+        (str): The track id of the task
+
+    """
+    track_id = self.set_track_id()
+    url = f"{self.domain}/queue_task/ai_task/"
+    data = {
+        "name": "speech_to_text",
+        "task_name": "speech2text",
+        "parameters": json.dumps(
+            {
+                "uid": uid,
+                "home_id": self.home_id,
+                "audio_index": audio_index,
+                "start_time": start_time.isoformat(),
+                "end_time": end_time.isoformat(),
+                "hardware_device_mac_address": self.mac_address,
+            }
+        ),
+        "track_id": track_id,
+    }
+    r = requests.post(
+        url, data=data, headers={"Authorization": f"Token {self.token}"}, timeout=30
+    )
+    logger.info(f"POST {url} {r.status_code}")
+    if r.status_code != 200:
+        logger.info(data)
+        return None
+    logger.info(r.json())
+    return track_id
+
+
+
+ +
+ +
+ + +

+ register_device(device_name=None, device_type=None, description=None) + +

+ + +
+ +

Register the device to the API. +Args: + device_name (Optional[str]): The device name, you can name it if you want to distinguish it better later + device_type (Optional[str]): The device type, this can be used to distinguish the device type + description (Optional[str]): The description of the device

+

Returns:

+ +
+ Source code in Client/Listener/api.py +
60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
def register_device(
+    self,
+    device_name: Optional[str] = None,
+    device_type: Optional[str] = None,
+    description: Optional[str] = None,
+):
+    """
+    Register the device to the API.
+    Args:
+        device_name (Optional[str]): The device name, you can name it if you want to distinguish it better later
+        device_type (Optional[str]): The device type, this can be used to distinguish the device type
+        description (Optional[str]): The description of the device
+
+    Returns:
+
+    """
+    url = f"{self.domain}/hardware/register/"
+
+    r = requests.post(
+        url,
+        data={
+            "home": self.home_id,
+            "mac_address": self.mac_address,
+            "device_name": device_name,
+            "device_type": device_type,
+            "description": description,
+        },
+        headers={"Authorization": f"Token {self.token}"},
+        timeout=30,
+    )
+    logger.info(url)
+
+    logger.info(f"POST {url} {r.status_code}")
+
+
+
+ +
+ +
+ + +

+ upload_file(source_file, dest_path) + +

+ + +
+ +

Upload the file to the API

+ +
+ Source code in Client/Listener/api.py +
229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
def upload_file(
+    self,
+    source_file: str,
+    dest_path: str,
+):
+    """
+    Upload the file to the API
+    """
+    url = f"{self.domain}/hardware/upload_file/"
+    files = {"file": open(source_file, "rb")}
+    data = {
+        "dest_path": dest_path,
+    }
+    r = requests.post(
+        url,
+        files=files,
+        data=data,
+        headers={"Authorization": f"Token {self.token}"},
+        timeout=30,
+    )
+    logger.info(f"POST {url} {r.status_code}")
+    if r.status_code != 200:
+        return None
+    return True
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Listener/audios_acquire/index.html b/Sources/Client/Listener/audios_acquire/index.html new file mode 100644 index 00000000..4beab9bc --- /dev/null +++ b/Sources/Client/Listener/audios_acquire/index.html @@ -0,0 +1,5531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Audio - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Audio

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ AudioAcquire + + +

+ + +
+ + +
+ Source code in Client/Listener/audios_acquire.py +
 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
class AudioAcquire:
+    def __init__(
+        self,
+        api_domain: str = "",
+        token: str = "",
+        home_id: Optional[str] = "",
+        energy_threshold: int = 5000,
+        default_microphone: str = "pulse",
+        record_timeout: int = 30000,
+        sampling_time: float = 0.25,
+        track_cluster: Optional[str] = None,
+    ):
+        """
+        The audio acquire class
+
+        Args:
+            api_domain (str): the api domain
+            token (str): the api token
+            home_id (str): the home id
+            energy_threshold (int): the energy threshold for the audio
+            default_microphone (str): the default microphone
+            record_timeout (int): the record timeout
+            sampling_time (float): the sampling time in seconds, default is 0.25
+            track_cluster (str): the track cluster
+        """
+        self.uid = str(uuid.uuid4())
+        self.data_dir = DATA_DIR / "audio" / self.uid  # the data dir
+        self.data_dir.mkdir(parents=True, exist_ok=True)
+
+        # api setup
+        self.api = API(
+            domain=api_domain, token=token, home_id=home_id, track_cluster=track_cluster
+        )
+        # register the device
+        self.api.register_device()
+
+        # the energy threshold for the microphone
+        self.energy_threshold = energy_threshold
+        # the default microphone
+        self.default_microphone = default_microphone
+        # the record timeout
+        self.record_timeout = record_timeout
+        # sampling time
+        self.sampling_time = sampling_time
+
+        # the audio index when record starts
+        self.audio_index = 0
+        logger.info(f"session uid: {self.uid}")
+        logger.info(f"starting timestamp {datetime.now()}")
+        self.source = self.get_source()
+
+    def get_source(self):
+        """
+        Get the source of the audio
+        Returns:
+
+        """
+
+        source = None
+
+        if "linux" in platform:
+            mic_name = self.default_microphone
+            # to do the debug
+            for index, name in enumerate(sr.Microphone.list_microphone_names()):
+                logger.debug(index)
+                logger.debug(name)
+            if not mic_name or mic_name == "list":
+                logger.info("Available microphone devices are: ")
+                for index, name in enumerate(sr.Microphone.list_microphone_names()):
+                    logger.critical(f'Microphone with name "{name}" found')
+                return
+            else:
+                for index, name in enumerate(sr.Microphone.list_microphone_names()):
+                    if mic_name in name:
+                        logger.debug(index)
+                        logger.debug(name)
+                        source = sr.Microphone(sample_rate=16000, device_index=index)
+                        break
+        else:
+            source = sr.Microphone(sample_rate=16000)
+        return source
+
+    def run(self):
+        data_queue = Queue()
+        sample_time_queue = Queue()
+        audio_index_queue = Queue()
+        recorder = sr.Recognizer()
+        recorder.energy_threshold = self.energy_threshold
+        recorder.dynamic_energy_threshold = False
+
+        logger.critical(f"Using microphone {self.source}")
+
+        with self.source:
+            recorder.adjust_for_ambient_noise(self.source)
+
+        def record_callback(_, audio: sr.AudioData) -> None:
+            """
+            Threaded callback function to receive audio data when recordings finish.
+            Args:
+                _:
+                audio (sr.AudioData): An AudioData containing the recorded bytes.
+
+            Returns:
+
+            """
+            with timer(logger, f"Recording {self.audio_index}"):
+                data = audio.get_raw_data()
+                wav_data = audio.get_wav_data()
+                sample_time = datetime.now()
+                data_queue.put(data)
+                sample_time_queue.put(sample_time)
+                audio_index_queue.put(self.audio_index)
+                curr_audio_dir = DATA_DIR / "audio" / self.uid
+                curr_audio_dir.mkdir(parents=True, exist_ok=True)
+
+                with open(
+                    curr_audio_dir
+                    / f"{self.audio_index}-{sample_time.strftime('%Y%m%d%H%M%S')}.wav",
+                    "wb",
+                ) as file:
+                    file.write(wav_data)
+                self.audio_index += 1
+
+        # Create a background thread that will pass us raw audio bytes.
+        # We could do this manually, but SpeechRecognizer provides a nice helper.
+        recorder.listen_in_background(
+            self.source, record_callback, phrase_time_limit=self.record_timeout
+        )  # phrase_time_limit continues to monitor the time
+
+        last_sample_start_time = datetime.now()
+        logger.info("Model loaded.")
+        logger.info("Listening for audio...")
+
+        while True:
+            try:
+                if not data_queue.empty():
+                    logger.info("no more sound, start transform...")
+
+                    data_queue.queue.clear()
+                    last_sample_time = sample_time_queue.queue[-1]
+                    sample_time_queue.queue.clear()
+                    audio_index = audio_index_queue.queue[-1]
+                    audio_index_queue.queue.clear()
+
+                    track_id = self.api.queue_speech_to_text(
+                        self.uid,
+                        audio_index=str(audio_index),
+                        start_time=last_sample_start_time,
+                        end_time=last_sample_time,
+                    )
+                    self.api.post_audio(
+                        self.uid,
+                        audio_index,
+                        f"{audio_index}-{last_sample_time.strftime('%Y%m%d%H%M%S')}.wav",
+                        last_sample_start_time,
+                        last_sample_time,
+                        track_id=track_id,
+                    )
+                    last_sample_start_time = last_sample_time
+
+                    sleep(self.sampling_time)
+            except KeyboardInterrupt:
+                break
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(api_domain='', token='', home_id='', energy_threshold=5000, default_microphone='pulse', record_timeout=30000, sampling_time=0.25, track_cluster=None) + +

+ + +
+ +

The audio acquire class

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
api_domain + str + +
+

the api domain

+
+
+ '' +
token + str + +
+

the api token

+
+
+ '' +
home_id + str + +
+

the home id

+
+
+ '' +
energy_threshold + int + +
+

the energy threshold for the audio

+
+
+ 5000 +
default_microphone + str + +
+

the default microphone

+
+
+ 'pulse' +
record_timeout + int + +
+

the record timeout

+
+
+ 30000 +
sampling_time + float + +
+

the sampling time in seconds, default is 0.25

+
+
+ 0.25 +
track_cluster + str + +
+

the track cluster

+
+
+ None +
+ +
+ Source code in Client/Listener/audios_acquire.py +
19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
def __init__(
+    self,
+    api_domain: str = "",
+    token: str = "",
+    home_id: Optional[str] = "",
+    energy_threshold: int = 5000,
+    default_microphone: str = "pulse",
+    record_timeout: int = 30000,
+    sampling_time: float = 0.25,
+    track_cluster: Optional[str] = None,
+):
+    """
+    The audio acquire class
+
+    Args:
+        api_domain (str): the api domain
+        token (str): the api token
+        home_id (str): the home id
+        energy_threshold (int): the energy threshold for the audio
+        default_microphone (str): the default microphone
+        record_timeout (int): the record timeout
+        sampling_time (float): the sampling time in seconds, default is 0.25
+        track_cluster (str): the track cluster
+    """
+    self.uid = str(uuid.uuid4())
+    self.data_dir = DATA_DIR / "audio" / self.uid  # the data dir
+    self.data_dir.mkdir(parents=True, exist_ok=True)
+
+    # api setup
+    self.api = API(
+        domain=api_domain, token=token, home_id=home_id, track_cluster=track_cluster
+    )
+    # register the device
+    self.api.register_device()
+
+    # the energy threshold for the microphone
+    self.energy_threshold = energy_threshold
+    # the default microphone
+    self.default_microphone = default_microphone
+    # the record timeout
+    self.record_timeout = record_timeout
+    # sampling time
+    self.sampling_time = sampling_time
+
+    # the audio index when record starts
+    self.audio_index = 0
+    logger.info(f"session uid: {self.uid}")
+    logger.info(f"starting timestamp {datetime.now()}")
+    self.source = self.get_source()
+
+
+
+ +
+ +
+ + +

+ get_source() + +

+ + +
+ +

Get the source of the audio +Returns:

+ +
+ Source code in Client/Listener/audios_acquire.py +
69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
def get_source(self):
+    """
+    Get the source of the audio
+    Returns:
+
+    """
+
+    source = None
+
+    if "linux" in platform:
+        mic_name = self.default_microphone
+        # to do the debug
+        for index, name in enumerate(sr.Microphone.list_microphone_names()):
+            logger.debug(index)
+            logger.debug(name)
+        if not mic_name or mic_name == "list":
+            logger.info("Available microphone devices are: ")
+            for index, name in enumerate(sr.Microphone.list_microphone_names()):
+                logger.critical(f'Microphone with name "{name}" found')
+            return
+        else:
+            for index, name in enumerate(sr.Microphone.list_microphone_names()):
+                if mic_name in name:
+                    logger.debug(index)
+                    logger.debug(name)
+                    source = sr.Microphone(sample_rate=16000, device_index=index)
+                    break
+    else:
+        source = sr.Microphone(sample_rate=16000)
+    return source
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ main() + +

+ + +
+ +

The main function

+ +
+ Source code in Client/Listener/audios_acquire.py +
183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
def main():
+    """
+    The main function
+    """
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument(
+        "--api_domain", default="http://localhost:8000", help="API domain", type=str
+    )
+    parser.add_argument("--token", default="", help="API token", type=str)
+    parser.add_argument("--home_id", default=None, help="which home it is", type=str)
+
+    parser.add_argument(
+        "--energy_threshold",
+        default=5000,
+        help="Energy level for mic to detect.",
+        type=int,
+    )
+    parser.add_argument(
+        "--record_timeout",
+        default=30000,
+        help="How real time the recording is in seconds.",
+        type=float,
+    )
+
+    parser.add_argument(
+        "--default_microphone",
+        default="pulse",
+        help="Default microphone name for SpeechRecognition. "
+        "Run this with 'list' to view available Microphones.",
+        type=str,
+    )
+    parser.add_argument(
+        "--track_cluster",
+        default=None,
+        help="The track cluster to be used",
+        type=str,
+    )
+
+    args = parser.parse_args()
+
+    audio_acquire = AudioAcquire(
+        api_domain=args.api_domain,
+        token=args.token,
+        home_id=args.home_id,
+        energy_threshold=args.energy_threshold,
+        default_microphone=args.default_microphone,
+        record_timeout=args.record_timeout,
+        track_cluster=args.track_cluster,
+    )
+    audio_acquire.run()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Listener/constants/index.html b/Sources/Client/Listener/constants/index.html new file mode 100644 index 00000000..c42b7195 --- /dev/null +++ b/Sources/Client/Listener/constants/index.html @@ -0,0 +1,4709 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Constants - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Constants

+ +
+ + + + +
+ + + +
+ + + + + + + +
+ + + +

+ S3_BUCKET = 'openomni' + + + module-attribute + + +

+ + +
+ +

get parent of current folder as root

+

ROOT_PATH = os.path.dirname(os.path.abspath(file)) +DATA_DIR = Path(ROOT_PATH) / "data"

+

DATA_DIR.mkdir(parents=True, exist_ok=True)

+

API_DOMAIN = "http://localhost:8000"

+

S3_BUCKET = "openomni" => change this to your bucket name

+
+ +
+ + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Listener/mock/data_extraction/index.html b/Sources/Client/Listener/mock/data_extraction/index.html new file mode 100644 index 00000000..6370b956 --- /dev/null +++ b/Sources/Client/Listener/mock/data_extraction/index.html @@ -0,0 +1,5659 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Mock - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Mock

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ DataMock + + +

+ + +
+ + +

We will first extract the audio and video from the video file. +And then treat it as current time + any time in the future.

+

Then save them into the data folder as other did

+
    +
  • audio
      +
    • /audio/uuid/0-datetime.wav
    • +
    +
  • +
  • video
      +
    • /videos/uuid/datetime.mp4
    • +
    • /video/uuid/frames/date-time/xx.jpg
    • +
    +
  • +
+

For the mock US-Election debate +It is: +- 02:53,3:20,20:20,20:39,33:38,34:18,55:15,55:40,80:05,80:18

+ +
+ Source code in Client/Listener/mock/data_extraction.py +
 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
class DataMock:
+    """
+    We will first extract the audio and video from the video file.
+    And then treat it as current time + any time in the future.
+
+    Then save them into the data folder as other did
+
+    - audio
+        - /audio/uuid/0-datetime.wav
+    - video
+        - /videos/uuid/datetime.mp4
+        - /video/uuid/frames/date-time/xx.jpg
+
+
+    For the mock US-Election debate
+    It is:
+    - 02:53,3:20,20:20,20:39,33:38,34:18,55:15,55:40,80:05,80:18
+
+    """
+
+    def __init__(
+        self,
+        api_domain: str,
+        token: str,
+        home_id: Optional[int],
+        track_cluster: str = None,
+    ):
+        self.api = API(
+            domain=api_domain, token=token, home_id=home_id, track_cluster=track_cluster
+        )
+        self.uid = str(uuid4())
+
+        self.audio_dir = DATA_DIR / "audio" / self.uid
+        self.audio_dir.mkdir(parents=True, exist_ok=True)
+        self.video_dir = DATA_DIR / "videos" / self.uid
+        self.frames_dir = self.video_dir / "frames"
+        self.frames_dir.mkdir(parents=True, exist_ok=True)
+        self.mock_dir = DATA_DIR / "mock" / "output"
+        self.mock_dir.mkdir(parents=True, exist_ok=True)
+
+        self.current_time = datetime.now()
+
+    def replay(self, time_ranges: List[Tuple[int, int]], input_video_path: str):
+        """
+        Replays the audio and video from the specified time
+        Args:
+            time_ranges (List[int, int]): List of time ranges in seconds.
+            input_video_path (str): Path to the input video file.
+
+        Returns:
+
+        """
+        for index, time_range in enumerate(time_ranges):
+            start_second, end_second = time_range
+            start_time = self.current_time + timedelta(seconds=start_second)
+            end_time = self.current_time + timedelta(seconds=end_second)
+
+            self.extract_audio_and_video(
+                input_video_path=input_video_path,
+                start_second=start_second,
+                end_second=end_second,
+                start_time=start_time,
+                end_time=end_time,
+                output_audio_path=self.audio_dir
+                / f"{index}-{end_time.strftime('%Y%m%d%H%M%S')}.wav",
+            )
+
+            track_id = self.api.queue_speech_to_text(
+                uid=self.uid,
+                audio_index=str(index),
+                start_time=start_time,
+                end_time=end_time,
+            )
+            self.api.post_audio(
+                uid=self.uid,
+                sequence_index=index,
+                audio_file=f"{index}-{end_time.strftime('%Y%m%d%H%M%S')}.wav",
+                start_time=start_time,
+                end_time=end_time,
+                track_id=track_id,
+            )
+
+    def extract_audio_and_video(
+        self,
+        input_video_path: str,
+        start_second: int,
+        end_second: int,
+        start_time: datetime,
+        end_time: datetime,
+        output_audio_path: str,
+    ):
+        """
+        Extracts the audio and video from a specified segment of a video file.
+
+        Args:
+            input_video_path (str): Path to the input video file.
+            start_second (int): Start time in seconds.
+            end_second (int): End time in seconds.
+            output_audio_path (str): Path to save the extracted audio file.
+        """
+        output_video_path = (
+            self.mock_dir
+            / f"{input_video_path.split('/')[-1]}-{start_second}-{end_second}.mp4"
+        ).as_posix()
+        # Load the video file
+        video_clip = VideoFileClip(input_video_path)
+
+        # Cut the video clip from start_time to end_time
+        sub_clip = video_clip.subclip(start_second, end_second)
+
+        # Write the video clip to the output path
+        sub_clip.write_videofile(output_video_path, codec="libx264", audio_codec="aac")
+
+        # Extract the audio from the sub clip
+        audio_clip = sub_clip.audio
+
+        # Write the audio clip to the output path
+        audio_clip.write_audiofile(output_audio_path)
+
+        # Close the clips
+        # video_clip.close()
+        sub_clip.close()
+        audio_clip.close()
+        video_clip.close()
+        # then I want ot split the video by minutes, each minute will have 1 mp4 file
+        # and the frames
+        start_minute = start_time.replace(second=0, microsecond=0)
+        end_minute = end_time.replace(second=0, microsecond=0) + timedelta(minutes=1)
+
+        for i in range((end_minute - start_minute).seconds // 60):
+            logger.info(f"Processing minute {i}")
+            video_clip = VideoFileClip(input_video_path)
+            the_minute_start_time = start_minute + timedelta(minutes=i)
+            the_minute_end_time = start_minute + timedelta(minutes=i + 1)
+            the_minute_output_video_path = (
+                Path(self.video_dir)
+                / (the_minute_start_time.strftime("%Y-%m-%d_%H-%M-%S") + ".mp4")
+            ).as_posix()
+            # recover the seconds range for each minute
+            the_minute_start_second = (
+                the_minute_start_time - self.current_time
+            ).seconds
+            the_minute_end_second = (the_minute_end_time - self.current_time).seconds
+            logger.info(f"{the_minute_start_second}-{the_minute_end_second}")
+            minute_clip = video_clip.subclip(
+                the_minute_start_second, the_minute_end_second
+            )
+            minute_clip.write_videofile(
+                the_minute_output_video_path, codec="libx264", audio_codec="aac"
+            )
+            minute_clip.close()
+
+            # frames_folder
+            frames_folder = self.frames_dir / the_minute_start_time.strftime(
+                "%Y-%m-%d_%H-%M"
+            )
+            frames_folder.mkdir(parents=True, exist_ok=True)
+            self.split_video_in_minutes(
+                the_minute_output_video_path, frames_folder.as_posix()
+            )
+            self.api.post_video(
+                self.uid,
+                the_minute_output_video_path.split("/")[-1],
+                start_time=the_minute_start_time,
+                end_time=the_minute_end_time,
+            )
+
+            video_clip.close()
+
+    @staticmethod
+    def split_video_in_minutes(video_path, output_folder, fps=1):
+        """
+        Splits a video into images.
+
+        Args:
+            video_path (str): Path to the video file.
+            output_folder (str): Folder to save the extracted images.
+            fps (int): Frames per second to extract. Defaults to 1.
+        """
+        # Load the video file
+        the_video_clip = VideoFileClip(video_path)
+
+        # Ensure the output folder exists
+        if not os.path.exists(output_folder):
+            os.makedirs(output_folder)
+
+        # Extract frames
+        for i, frame in enumerate(the_video_clip.iter_frames(fps=fps)):
+            # Save each frame as an image
+            frame_path = os.path.join(output_folder, f"{i}.png")
+            imageio.imwrite(frame_path, frame)
+
+        # Close the video clip
+        the_video_clip.close()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ extract_audio_and_video(input_video_path, start_second, end_second, start_time, end_time, output_audio_path) + +

+ + +
+ +

Extracts the audio and video from a specified segment of a video file.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
input_video_path + str + +
+

Path to the input video file.

+
+
+ required +
start_second + int + +
+

Start time in seconds.

+
+
+ required +
end_second + int + +
+

End time in seconds.

+
+
+ required +
output_audio_path + str + +
+

Path to save the extracted audio file.

+
+
+ required +
+ +
+ Source code in Client/Listener/mock/data_extraction.py +
111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
def extract_audio_and_video(
+    self,
+    input_video_path: str,
+    start_second: int,
+    end_second: int,
+    start_time: datetime,
+    end_time: datetime,
+    output_audio_path: str,
+):
+    """
+    Extracts the audio and video from a specified segment of a video file.
+
+    Args:
+        input_video_path (str): Path to the input video file.
+        start_second (int): Start time in seconds.
+        end_second (int): End time in seconds.
+        output_audio_path (str): Path to save the extracted audio file.
+    """
+    output_video_path = (
+        self.mock_dir
+        / f"{input_video_path.split('/')[-1]}-{start_second}-{end_second}.mp4"
+    ).as_posix()
+    # Load the video file
+    video_clip = VideoFileClip(input_video_path)
+
+    # Cut the video clip from start_time to end_time
+    sub_clip = video_clip.subclip(start_second, end_second)
+
+    # Write the video clip to the output path
+    sub_clip.write_videofile(output_video_path, codec="libx264", audio_codec="aac")
+
+    # Extract the audio from the sub clip
+    audio_clip = sub_clip.audio
+
+    # Write the audio clip to the output path
+    audio_clip.write_audiofile(output_audio_path)
+
+    # Close the clips
+    # video_clip.close()
+    sub_clip.close()
+    audio_clip.close()
+    video_clip.close()
+    # then I want ot split the video by minutes, each minute will have 1 mp4 file
+    # and the frames
+    start_minute = start_time.replace(second=0, microsecond=0)
+    end_minute = end_time.replace(second=0, microsecond=0) + timedelta(minutes=1)
+
+    for i in range((end_minute - start_minute).seconds // 60):
+        logger.info(f"Processing minute {i}")
+        video_clip = VideoFileClip(input_video_path)
+        the_minute_start_time = start_minute + timedelta(minutes=i)
+        the_minute_end_time = start_minute + timedelta(minutes=i + 1)
+        the_minute_output_video_path = (
+            Path(self.video_dir)
+            / (the_minute_start_time.strftime("%Y-%m-%d_%H-%M-%S") + ".mp4")
+        ).as_posix()
+        # recover the seconds range for each minute
+        the_minute_start_second = (
+            the_minute_start_time - self.current_time
+        ).seconds
+        the_minute_end_second = (the_minute_end_time - self.current_time).seconds
+        logger.info(f"{the_minute_start_second}-{the_minute_end_second}")
+        minute_clip = video_clip.subclip(
+            the_minute_start_second, the_minute_end_second
+        )
+        minute_clip.write_videofile(
+            the_minute_output_video_path, codec="libx264", audio_codec="aac"
+        )
+        minute_clip.close()
+
+        # frames_folder
+        frames_folder = self.frames_dir / the_minute_start_time.strftime(
+            "%Y-%m-%d_%H-%M"
+        )
+        frames_folder.mkdir(parents=True, exist_ok=True)
+        self.split_video_in_minutes(
+            the_minute_output_video_path, frames_folder.as_posix()
+        )
+        self.api.post_video(
+            self.uid,
+            the_minute_output_video_path.split("/")[-1],
+            start_time=the_minute_start_time,
+            end_time=the_minute_end_time,
+        )
+
+        video_clip.close()
+
+
+
+ +
+ +
+ + +

+ replay(time_ranges, input_video_path) + +

+ + +
+ +

Replays the audio and video from the specified time +Args: + time_ranges (List[int, int]): List of time ranges in seconds. + input_video_path (str): Path to the input video file.

+

Returns:

+ +
+ Source code in Client/Listener/mock/data_extraction.py +
 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
def replay(self, time_ranges: List[Tuple[int, int]], input_video_path: str):
+    """
+    Replays the audio and video from the specified time
+    Args:
+        time_ranges (List[int, int]): List of time ranges in seconds.
+        input_video_path (str): Path to the input video file.
+
+    Returns:
+
+    """
+    for index, time_range in enumerate(time_ranges):
+        start_second, end_second = time_range
+        start_time = self.current_time + timedelta(seconds=start_second)
+        end_time = self.current_time + timedelta(seconds=end_second)
+
+        self.extract_audio_and_video(
+            input_video_path=input_video_path,
+            start_second=start_second,
+            end_second=end_second,
+            start_time=start_time,
+            end_time=end_time,
+            output_audio_path=self.audio_dir
+            / f"{index}-{end_time.strftime('%Y%m%d%H%M%S')}.wav",
+        )
+
+        track_id = self.api.queue_speech_to_text(
+            uid=self.uid,
+            audio_index=str(index),
+            start_time=start_time,
+            end_time=end_time,
+        )
+        self.api.post_audio(
+            uid=self.uid,
+            sequence_index=index,
+            audio_file=f"{index}-{end_time.strftime('%Y%m%d%H%M%S')}.wav",
+            start_time=start_time,
+            end_time=end_time,
+            track_id=track_id,
+        )
+
+
+
+ +
+ +
+ + +

+ split_video_in_minutes(video_path, output_folder, fps=1) + + + staticmethod + + +

+ + +
+ +

Splits a video into images.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
video_path + str + +
+

Path to the video file.

+
+
+ required +
output_folder + str + +
+

Folder to save the extracted images.

+
+
+ required +
fps + int + +
+

Frames per second to extract. Defaults to 1.

+
+
+ 1 +
+ +
+ Source code in Client/Listener/mock/data_extraction.py +
198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
@staticmethod
+def split_video_in_minutes(video_path, output_folder, fps=1):
+    """
+    Splits a video into images.
+
+    Args:
+        video_path (str): Path to the video file.
+        output_folder (str): Folder to save the extracted images.
+        fps (int): Frames per second to extract. Defaults to 1.
+    """
+    # Load the video file
+    the_video_clip = VideoFileClip(video_path)
+
+    # Ensure the output folder exists
+    if not os.path.exists(output_folder):
+        os.makedirs(output_folder)
+
+    # Extract frames
+    for i, frame in enumerate(the_video_clip.iter_frames(fps=fps)):
+        # Save each frame as an image
+        frame_path = os.path.join(output_folder, f"{i}.png")
+        imageio.imwrite(frame_path, frame)
+
+    # Close the video clip
+    the_video_clip.close()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Listener/setup/index.html b/Sources/Client/Listener/setup/index.html new file mode 100644 index 00000000..e21aef4b --- /dev/null +++ b/Sources/Client/Listener/setup/index.html @@ -0,0 +1,4575 @@ + + + + + + + + + + + + + + + + + + + + + Setup - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Setup

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Listener/storage/index.html b/Sources/Client/Listener/storage/index.html new file mode 100644 index 00000000..36fef5b6 --- /dev/null +++ b/Sources/Client/Listener/storage/index.html @@ -0,0 +1,5676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Storage - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Storage

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ APISyncHandler + + +

+ + +
+

+ Bases: FileSystemEventHandler

+ + +

Sync the files to s3 when they are created, modified, moved or deleted

+ +
+ Source code in Client/Listener/storage.py +
113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
class APISyncHandler(FileSystemEventHandler):
+    """
+    Sync the files to s3 when they are created, modified, moved or deleted
+    """
+
+    def __init__(self, home_id: int, api: API):
+        super().__init__()
+        self.home_id = home_id
+        self.api = api
+
+    def on_any_event(self, event):
+        if event.is_directory:
+            return None
+
+        elif event.event_type in ("created", "modified", "moved", "deleted"):
+            # print(f"Event type: {event.event_type} - Path: {event.src_path}")
+            # only process .avi and .wav files
+
+            if event.src_path.split("/")[-1].split(".")[-1] not in [
+                "mp4",
+                "wav",
+                "mp3",
+                "jpg",
+                "jpeg",
+                "png",
+            ]:
+                return None
+            try:
+                self.api.upload_file(
+                    event.src_path,
+                    f"Listener/{event.src_path.split(DATA_DIR.as_posix())[1].strip('/')}",
+                )
+                logger.info(f"Uploaded file to server: {event.src_path}")
+            except Exception as e:
+                logger.error(f"Error uploading file to s3: {e}")
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ LocalSyncHandler + + +

+ + +
+

+ Bases: FileSystemEventHandler

+ + +

Sync the files to disk when they are created, modified, moved or deleted

+ +
+ Source code in Client/Listener/storage.py +
24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
class LocalSyncHandler(FileSystemEventHandler):
+    """
+    Sync the files to disk when they are created, modified, moved or deleted
+    """
+
+    def __init__(self, src_path: str, dest_path: str, sshpass: str):
+        """
+
+        Args:
+            src_path (str): The source path to sync
+            dest_path (str): The destination path to sync
+            sshpass (str): The password to ssh
+        """
+        super().__init__()
+        self.src_path = src_path
+        self.dest_path = dest_path
+        self.sshpass = sshpass
+
+    def on_any_event(self, event):
+        """
+        Sync the files to disk when they are created, modified, moved or deleted
+        Args:
+            event:
+
+        Returns:
+
+        """
+        if event.is_directory:
+            return None
+        else:
+            if self.sshpass:
+                subprocess.call(
+                    [
+                        "sshpass",
+                        "-p",
+                        self.sshpass,
+                        "rsync",
+                        "-avz",
+                        "--delete",
+                        self.src_path,
+                        self.dest_path,
+                    ]
+                )
+            else:
+                # wer can set up the authentication first, then we can use the rsync command
+                subprocess.call(
+                    ["rsync", "-avz", "--delete", self.src_path, self.dest_path]
+                )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(src_path, dest_path, sshpass) + +

+ + +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
src_path + str + +
+

The source path to sync

+
+
+ required +
dest_path + str + +
+

The destination path to sync

+
+
+ required +
sshpass + str + +
+

The password to ssh

+
+
+ required +
+ +
+ Source code in Client/Listener/storage.py +
29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
def __init__(self, src_path: str, dest_path: str, sshpass: str):
+    """
+
+    Args:
+        src_path (str): The source path to sync
+        dest_path (str): The destination path to sync
+        sshpass (str): The password to ssh
+    """
+    super().__init__()
+    self.src_path = src_path
+    self.dest_path = dest_path
+    self.sshpass = sshpass
+
+
+
+ +
+ +
+ + +

+ on_any_event(event) + +

+ + +
+ +

Sync the files to disk when they are created, modified, moved or deleted +Args: + event:

+

Returns:

+ +
+ Source code in Client/Listener/storage.py +
42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
def on_any_event(self, event):
+    """
+    Sync the files to disk when they are created, modified, moved or deleted
+    Args:
+        event:
+
+    Returns:
+
+    """
+    if event.is_directory:
+        return None
+    else:
+        if self.sshpass:
+            subprocess.call(
+                [
+                    "sshpass",
+                    "-p",
+                    self.sshpass,
+                    "rsync",
+                    "-avz",
+                    "--delete",
+                    self.src_path,
+                    self.dest_path,
+                ]
+            )
+        else:
+            # wer can set up the authentication first, then we can use the rsync command
+            subprocess.call(
+                ["rsync", "-avz", "--delete", self.src_path, self.dest_path]
+            )
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ S3SyncHandler + + +

+ + +
+

+ Bases: FileSystemEventHandler

+ + +

Sync the files to s3 when they are created, modified, moved or deleted

+ +
+ Source code in Client/Listener/storage.py +
 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
class S3SyncHandler(FileSystemEventHandler):
+    """
+    Sync the files to s3 when they are created, modified, moved or deleted
+    """
+
+    def __init__(self, home_id: int, s3_client):
+        super().__init__()
+        self.home_id = home_id
+        self.s3_client = s3_client
+
+    def on_any_event(self, event):
+        if event.is_directory:
+            return None
+
+        elif event.event_type in ("created", "modified", "moved", "deleted"):
+            # print(f"Event type: {event.event_type} - Path: {event.src_path}")
+            # only process .avi and .wav files
+
+            if event.src_path.split("/")[-1].split(".")[-1] not in [
+                "mp4",
+                "wav",
+                "mp3",
+                "jpg",
+                "jpeg",
+                "png",
+            ]:
+                return None
+            try:
+                self.s3_client.upload_file(
+                    event.src_path,
+                    S3_BUCKET,
+                    f"Listener/{event.src_path.split(DATA_DIR.as_posix())[1].strip('/')}",
+                )
+                logger.info(f"Uploaded file to s3: {event.src_path}")
+                # logger.info(f"Listener/{event.src_path.split(DATA_DIR.as_posix())[1].strip('/')}")
+            except Exception as e:
+                logger.error(f"Error uploading file to s3: {e}")
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ StorageHandler + + +

+ + +
+ + +
+ Source code in Client/Listener/storage.py +
150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
class StorageHandler:
+    def __init__(
+        self,
+        api_domain: str = "",
+        token: str = "",
+        home_id: int = None,
+        dest_dir: Optional[str] = None,
+        dest_password: Optional[str] = None,
+    ):
+        """
+        Args:
+            api_domain (str): the api domain
+            token (str): the api token
+            home_id (int): the home id
+            dest_dir (str): the destination directory to sync, like
+            dest_password (str): the destination password to sync
+        """
+        self.home_id = home_id
+        self.dest_dir = dest_dir
+        self.dest_password = dest_password
+        self.api = API(domain=api_domain, token=token, home_id=home_id)
+        self.storage_solution = self.api.get_storage_solution()
+
+    def process(self):
+        if self.storage_solution == STORAGE_SOLUTION_VOLUME:
+            logger.info("No need to process files")
+            return
+
+        if self.storage_solution == STORAGE_SOLUTION_S3:
+            self.process_s3()
+
+        if self.storage_solution == STORAGE_SOLUTION_LOCAL:
+            self.process_local_network()
+
+        if self.storage_solution == STORAGE_SOLUTION_API:
+            self.process_api()
+
+    def process_s3(self):
+        observer = Observer()
+        s3_handler = S3SyncHandler(self.home_id, s3_client=boto3.client("s3"))
+        observer.schedule(s3_handler, str(DATA_DIR), recursive=True)
+        observer.start()
+        try:
+            while True:
+                time.sleep(1)
+        except KeyboardInterrupt:
+            observer.stop()
+        observer.join()
+
+    def process_local_network(self):
+        observer = Observer()
+        if not self.dest_dir:
+            logger.error("dest_dir is required for local network sync")
+            return
+        local_handler = LocalSyncHandler(
+            src_path=str(DATA_DIR),
+            dest_path=self.dest_dir,
+            sshpass=self.dest_password,
+        )
+        observer.schedule(local_handler, str(DATA_DIR), recursive=True)
+        observer.start()
+        try:
+            while True:
+                time.sleep(1)
+        except KeyboardInterrupt:
+            observer.stop()
+        observer.join()
+
+    def process_api(self):
+        observer = Observer()
+        api_handler = APISyncHandler(self.home_id, self.api)
+        observer.schedule(api_handler, str(DATA_DIR), recursive=True)
+        observer.start()
+        try:
+            while True:
+                time.sleep(1)
+        except KeyboardInterrupt:
+            observer.stop()
+        observer.join()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(api_domain='', token='', home_id=None, dest_dir=None, dest_password=None) + +

+ + +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
api_domain + str + +
+

the api domain

+
+
+ '' +
token + str + +
+

the api token

+
+
+ '' +
home_id + int + +
+

the home id

+
+
+ None +
dest_dir + str + +
+

the destination directory to sync, like

+
+
+ None +
dest_password + str + +
+

the destination password to sync

+
+
+ None +
+ +
+ Source code in Client/Listener/storage.py +
151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
def __init__(
+    self,
+    api_domain: str = "",
+    token: str = "",
+    home_id: int = None,
+    dest_dir: Optional[str] = None,
+    dest_password: Optional[str] = None,
+):
+    """
+    Args:
+        api_domain (str): the api domain
+        token (str): the api token
+        home_id (int): the home id
+        dest_dir (str): the destination directory to sync, like
+        dest_password (str): the destination password to sync
+    """
+    self.home_id = home_id
+    self.dest_dir = dest_dir
+    self.dest_password = dest_password
+    self.api = API(domain=api_domain, token=token, home_id=home_id)
+    self.storage_solution = self.api.get_storage_solution()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Listener/utils/index.html b/Sources/Client/Listener/utils/index.html new file mode 100644 index 00000000..c63ea0d3 --- /dev/null +++ b/Sources/Client/Listener/utils/index.html @@ -0,0 +1,5131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Utils - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Utils

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ timer + + +

+ + +
+ + +

util function used to log the time taken by a part of program

+ +
+ Source code in Client/Listener/utils.py +
40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
class timer:
+    """
+    util function used to log the time taken by a part of program
+    """
+
+    def __init__(self, logger: Logger, message: str):
+        """
+        init the timer
+
+        Args:
+            logger: Logger
+                the logger to log the message
+            message: str
+                the message to log
+        """
+        self.message = message
+        self.logger = logger
+        self.start = 0
+        self.duration = 0
+        self.sub_timers = []
+
+    def __enter__(self):
+        """
+        context enter to start write this
+        """
+        self.start = time.time()
+        self.logger.info("Starting %s" % self.message)
+        return self
+
+    def __exit__(self, context, value, traceback):
+        """
+        context exit will write this
+        """
+        self.duration = time.time() - self.start
+        self.logger.info(f"Finished {self.message}, that took {self.duration:.3f}")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __enter__() + +

+ + +
+ +

context enter to start write this

+ +
+ Source code in Client/Listener/utils.py +
61
+62
+63
+64
+65
+66
+67
def __enter__(self):
+    """
+    context enter to start write this
+    """
+    self.start = time.time()
+    self.logger.info("Starting %s" % self.message)
+    return self
+
+
+
+ +
+ +
+ + +

+ __exit__(context, value, traceback) + +

+ + +
+ +

context exit will write this

+ +
+ Source code in Client/Listener/utils.py +
69
+70
+71
+72
+73
+74
def __exit__(self, context, value, traceback):
+    """
+    context exit will write this
+    """
+    self.duration = time.time() - self.start
+    self.logger.info(f"Finished {self.message}, that took {self.duration:.3f}")
+
+
+
+ +
+ +
+ + +

+ __init__(logger, message) + +

+ + +
+ +

init the timer

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
logger + Logger + +
+

Logger +the logger to log the message

+
+
+ required +
message + str + +
+

str +the message to log

+
+
+ required +
+ +
+ Source code in Client/Listener/utils.py +
45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
def __init__(self, logger: Logger, message: str):
+    """
+    init the timer
+
+    Args:
+        logger: Logger
+            the logger to log the message
+        message: str
+            the message to log
+    """
+    self.message = message
+    self.logger = logger
+    self.start = 0
+    self.duration = 0
+    self.sub_timers = []
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ get_logger(logger_name=None, stream=True) + +

+ + +
+ +

init the logger, give it proper format, log them both in terminal stream and file

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
logger_name + Optional[str] + +
+

str +the name of the logger

+
+
+ None +
stream + bool + +
+

bool +whether to log in the terminal stream

+
+
+ True +
+ +
+ Source code in Client/Listener/utils.py +
 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
def get_logger(logger_name: Optional[str] = None, stream: bool = True):
+    """
+    init the logger, give it proper format, log them both in terminal stream and file
+
+    Args:
+        logger_name: str
+            the name of the logger
+        stream: bool
+            whether to log in the terminal stream
+    """
+    logging.basicConfig(
+        format="%(name)s: %(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
+        datefmt="%Y-%m-%d:%H:%M:%S",
+        level=logging.INFO,
+    )
+
+    logger = logging.getLogger(logger_name)
+    logger.setLevel(logging.INFO)
+    logger.propagate = False
+    formatter = logging.Formatter(
+        "CLIENT: %(name)s | %(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
+    )
+    if not logger.hasHandlers() and stream:
+        stdout_handler = logging.StreamHandler()
+        stdout_handler.setFormatter(formatter)
+        stdout_handler.setLevel(logging.INFO)
+        logger.addHandler(stdout_handler)
+
+    return logger
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Listener/videos_acquire/index.html b/Sources/Client/Listener/videos_acquire/index.html new file mode 100644 index 00000000..4044dc86 --- /dev/null +++ b/Sources/Client/Listener/videos_acquire/index.html @@ -0,0 +1,5296 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Video - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Video

+ +
+ + + + +
+ + + +
+ + + + + + + +
+ + + +

+ logger = get_logger('video_acquire') + + + module-attribute + + +

+ + +
+ +

PER_LENGTH = 1800 # 30 minutes

+

the screen width and height

+

WIDTH = 640 +HEIGHT = 480 +FPS = 24.0

+
+ +
+ + +
+ + + +

+ VideoAcquire + + +

+ + +
+ + +
+ Source code in Client/Listener/videos_acquire.py +
 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
class VideoAcquire:
+    def __init__(
+        self,
+        width=WIDTH,
+        height=HEIGHT,
+        fps=FPS,
+        per_video_length=PER_LENGTH,
+        api_domain="",
+        token="",
+        home_id: int = None,
+    ):
+        """
+        init the video acquire
+        Args:
+            width: (int) the width of the video
+            height (int): the height of the video
+            fps (float): the frame per second
+            per_video_length (int): the length of the video
+            api_domain (str): the domain of the api
+            token (str): the token of the api
+            home_id (int): the home id
+        """
+        self.uid = str(uuid.uuid4())
+        self.data_dir = DATA_DIR / "videos" / self.uid  # the data dir
+        self.data_dir.mkdir(parents=True, exist_ok=True)
+        self.width = width  # the width and height of the video
+        self.height = height  # the width and height of the video
+        self.fps = fps  # frame per second
+        self.per_video_length = per_video_length  # the length of the video
+        self.api = API(domain=api_domain, token=token, home_id=home_id)
+        self.api.register_device()
+
+    def record(self):
+        """
+        start to record the video
+        """
+        segment_images = 60
+        seconds = 0
+        minutes = 1
+
+        # init the recording
+        cap = cv2.VideoCapture(0)
+        # set the width and height
+        cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
+        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
+        # set the frame per second
+        cap.set(cv2.CAP_PROP_FPS, 24.0)
+        # use the XVID codec
+        fourcc = cv2.VideoWriter_fourcc(*"avc1")  # noqa
+
+        cap_fps = cap.get(5)  # get the fps of the camera
+        logger.info(f"the fps of the camera is {cap_fps}")
+
+        start_time = datetime.now()
+        filename = self.data_dir / (start_time.strftime("%Y-%m-%d_%H-%M-%S") + ".mp4")
+
+        out = cv2.VideoWriter(
+            filename.as_posix(), fourcc, self.fps, (self.width, self.height)
+        )  # noqa
+        logger.info("start to record the video")
+        flag = True
+        while flag:
+            try:
+                if (datetime.now() - start_time).seconds >= self.per_video_length:
+                    # stop the recording and save the video when the time is up
+                    logger.info(f"the recording is finished, saved to file: {filename}")
+                    out.release()
+                    # TODO: post the video to the server
+                    self.api.post_video(
+                        self.uid,
+                        filename.as_posix().split("/")[-1],
+                        start_time=start_time,
+                        end_time=datetime.now(),
+                    )
+                    # resume the recording
+                    start_time = datetime.now()
+                    filename = self.data_dir / (
+                        start_time.strftime("%Y-%m-%d_%H-%M-%S") + ".mp4"
+                    )
+                    out = cv2.VideoWriter(
+                        filename.as_posix(), fourcc, FPS, (self.width, self.height)
+                    )  # noqa
+                else:
+                    # read the frame
+                    logger.debug("Try to process the frame")
+                    ret, frame = cap.read()
+                    if ret:
+                        logger.debug("write the frame")
+                        out.write(frame)
+                        # cv2.imshow("frame", frame)
+                        if seconds == segment_images:
+                            logger.info("begin the next frame segment")
+                            seconds = 0
+                            minutes += 1
+                        if seconds < segment_images:
+                            image_dir = (
+                                self.data_dir
+                                / "frames"
+                                / f"{datetime.now().strftime('%Y-%m-%d_%H-%M')}"
+                            )
+                            image_dir.mkdir(parents=True, exist_ok=True)
+                            cv2.imwrite(
+                                (image_dir / f"{seconds}.jpg").as_posix(), frame
+                            )
+                            seconds += 1
+                if cv2.waitKey(1) == ord("q"):
+                    break
+            except KeyboardInterrupt:
+                break
+        cap.release()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(width=WIDTH, height=HEIGHT, fps=FPS, per_video_length=PER_LENGTH, api_domain='', token='', home_id=None) + +

+ + +
+ +

init the video acquire +Args: + width: (int) the width of the video + height (int): the height of the video + fps (float): the frame per second + per_video_length (int): the length of the video + api_domain (str): the domain of the api + token (str): the token of the api + home_id (int): the home id

+ +
+ Source code in Client/Listener/videos_acquire.py +
33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
def __init__(
+    self,
+    width=WIDTH,
+    height=HEIGHT,
+    fps=FPS,
+    per_video_length=PER_LENGTH,
+    api_domain="",
+    token="",
+    home_id: int = None,
+):
+    """
+    init the video acquire
+    Args:
+        width: (int) the width of the video
+        height (int): the height of the video
+        fps (float): the frame per second
+        per_video_length (int): the length of the video
+        api_domain (str): the domain of the api
+        token (str): the token of the api
+        home_id (int): the home id
+    """
+    self.uid = str(uuid.uuid4())
+    self.data_dir = DATA_DIR / "videos" / self.uid  # the data dir
+    self.data_dir.mkdir(parents=True, exist_ok=True)
+    self.width = width  # the width and height of the video
+    self.height = height  # the width and height of the video
+    self.fps = fps  # frame per second
+    self.per_video_length = per_video_length  # the length of the video
+    self.api = API(domain=api_domain, token=token, home_id=home_id)
+    self.api.register_device()
+
+
+
+ +
+ +
+ + +

+ record() + +

+ + +
+ +

start to record the video

+ +
+ Source code in Client/Listener/videos_acquire.py +
 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
def record(self):
+    """
+    start to record the video
+    """
+    segment_images = 60
+    seconds = 0
+    minutes = 1
+
+    # init the recording
+    cap = cv2.VideoCapture(0)
+    # set the width and height
+    cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
+    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
+    # set the frame per second
+    cap.set(cv2.CAP_PROP_FPS, 24.0)
+    # use the XVID codec
+    fourcc = cv2.VideoWriter_fourcc(*"avc1")  # noqa
+
+    cap_fps = cap.get(5)  # get the fps of the camera
+    logger.info(f"the fps of the camera is {cap_fps}")
+
+    start_time = datetime.now()
+    filename = self.data_dir / (start_time.strftime("%Y-%m-%d_%H-%M-%S") + ".mp4")
+
+    out = cv2.VideoWriter(
+        filename.as_posix(), fourcc, self.fps, (self.width, self.height)
+    )  # noqa
+    logger.info("start to record the video")
+    flag = True
+    while flag:
+        try:
+            if (datetime.now() - start_time).seconds >= self.per_video_length:
+                # stop the recording and save the video when the time is up
+                logger.info(f"the recording is finished, saved to file: {filename}")
+                out.release()
+                # TODO: post the video to the server
+                self.api.post_video(
+                    self.uid,
+                    filename.as_posix().split("/")[-1],
+                    start_time=start_time,
+                    end_time=datetime.now(),
+                )
+                # resume the recording
+                start_time = datetime.now()
+                filename = self.data_dir / (
+                    start_time.strftime("%Y-%m-%d_%H-%M-%S") + ".mp4"
+                )
+                out = cv2.VideoWriter(
+                    filename.as_posix(), fourcc, FPS, (self.width, self.height)
+                )  # noqa
+            else:
+                # read the frame
+                logger.debug("Try to process the frame")
+                ret, frame = cap.read()
+                if ret:
+                    logger.debug("write the frame")
+                    out.write(frame)
+                    # cv2.imshow("frame", frame)
+                    if seconds == segment_images:
+                        logger.info("begin the next frame segment")
+                        seconds = 0
+                        minutes += 1
+                    if seconds < segment_images:
+                        image_dir = (
+                            self.data_dir
+                            / "frames"
+                            / f"{datetime.now().strftime('%Y-%m-%d_%H-%M')}"
+                        )
+                        image_dir.mkdir(parents=True, exist_ok=True)
+                        cv2.imwrite(
+                            (image_dir / f"{seconds}.jpg").as_posix(), frame
+                        )
+                        seconds += 1
+            if cv2.waitKey(1) == ord("q"):
+                break
+        except KeyboardInterrupt:
+            break
+    cap.release()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Responder/api/index.html b/Sources/Client/Responder/api/index.html new file mode 100644 index 00000000..0fa0e91d --- /dev/null +++ b/Sources/Client/Responder/api/index.html @@ -0,0 +1,5093 @@ + + + + + + + + + + + + + + + + + + + + + + + + + API - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

API

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ API + + +

+ + +
+ + +
+ Source code in Client/Responder/api.py +
11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
class API:
+    def __init__(
+        self,
+        domain: str = API_DOMAIN,
+        token: str = "",
+        home_id: int = None,
+    ):
+        """
+        init the api
+        Args:
+            domain (str): the domain of the api
+            token (str): the token of the api
+            home_id (int): the home id
+        """
+        self.domain = domain
+        self.token = token
+        self.mac_address = get_mac_address()
+        self.home_id = home_id
+
+    def register_device(
+        self,
+        device_name: Optional[str] = None,
+        device_type: Optional[str] = None,
+        description: Optional[str] = None,
+    ):
+        """
+        register the device
+        Args:
+            device_name (Optional[str]): the device name
+            device_type (Optional[str]): the device type
+            description (Optional[str]): the description of the device
+
+        Returns:
+
+        """
+        url = f"{self.domain}/hardware/register/"
+
+        r = requests.post(
+            url,
+            data={
+                "home": self.home_id,
+                "mac_address": self.mac_address,
+                "device_name": device_name,
+                "device_type": device_type,
+                "description": description,
+            },
+            headers={"Authorization": f"Token {self.token}"},
+        )
+        logger.info(url)
+        logger.info(f"POST {url} {r.status_code}")
+
+    def get_spoken_speech(self):
+        """
+        Call the API to get the speech to play
+        Returns:
+
+        """
+        url = f"{self.domain}/hardware/speech/?home_id={self.home_id}"
+        logger.info(url)
+        r = requests.get(
+            url, headers={"Authorization": f"Token {self.token}"}, timeout=30
+        )
+
+        logger.info(f"get {url} {r.status_code}")
+        # logger.info(r.text)
+        if r.status_code != 200:
+            return []
+        return r.json()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(domain=API_DOMAIN, token='', home_id=None) + +

+ + +
+ +

init the api +Args: + domain (str): the domain of the api + token (str): the token of the api + home_id (int): the home id

+ +
+ Source code in Client/Responder/api.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
def __init__(
+    self,
+    domain: str = API_DOMAIN,
+    token: str = "",
+    home_id: int = None,
+):
+    """
+    init the api
+    Args:
+        domain (str): the domain of the api
+        token (str): the token of the api
+        home_id (int): the home id
+    """
+    self.domain = domain
+    self.token = token
+    self.mac_address = get_mac_address()
+    self.home_id = home_id
+
+
+
+ +
+ +
+ + +

+ get_spoken_speech() + +

+ + +
+ +

Call the API to get the speech to play +Returns:

+ +
+ Source code in Client/Responder/api.py +
62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
def get_spoken_speech(self):
+    """
+    Call the API to get the speech to play
+    Returns:
+
+    """
+    url = f"{self.domain}/hardware/speech/?home_id={self.home_id}"
+    logger.info(url)
+    r = requests.get(
+        url, headers={"Authorization": f"Token {self.token}"}, timeout=30
+    )
+
+    logger.info(f"get {url} {r.status_code}")
+    # logger.info(r.text)
+    if r.status_code != 200:
+        return []
+    return r.json()
+
+
+
+ +
+ +
+ + +

+ register_device(device_name=None, device_type=None, description=None) + +

+ + +
+ +

register the device +Args: + device_name (Optional[str]): the device name + device_type (Optional[str]): the device type + description (Optional[str]): the description of the device

+

Returns:

+ +
+ Source code in Client/Responder/api.py +
30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
def register_device(
+    self,
+    device_name: Optional[str] = None,
+    device_type: Optional[str] = None,
+    description: Optional[str] = None,
+):
+    """
+    register the device
+    Args:
+        device_name (Optional[str]): the device name
+        device_type (Optional[str]): the device type
+        description (Optional[str]): the description of the device
+
+    Returns:
+
+    """
+    url = f"{self.domain}/hardware/register/"
+
+    r = requests.post(
+        url,
+        data={
+            "home": self.home_id,
+            "mac_address": self.mac_address,
+            "device_name": device_name,
+            "device_type": device_type,
+            "description": description,
+        },
+        headers={"Authorization": f"Token {self.token}"},
+    )
+    logger.info(url)
+    logger.info(f"POST {url} {r.status_code}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Responder/constants/index.html b/Sources/Client/Responder/constants/index.html new file mode 100644 index 00000000..e1af0bf1 --- /dev/null +++ b/Sources/Client/Responder/constants/index.html @@ -0,0 +1,4681 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Constants - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Constants

+ +
+ + + + +
+ + + +
+ + + + + + + +
+ + + +

+ API_DOMAIN = 'http://localhost:8000' + + + module-attribute + + +

+ + +
+ +
# get parent of current folder as root
+ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
+DATA_DIR = Path(ROOT_PATH) / "data"
+
+DATA_DIR.mkdir(parents=True, exist_ok=True)
+
+API_DOMAIN = "http://localhost:8000"
+
+
+ +
+ + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Responder/play_speech/index.html b/Sources/Client/Responder/play_speech/index.html new file mode 100644 index 00000000..87a31b8a --- /dev/null +++ b/Sources/Client/Responder/play_speech/index.html @@ -0,0 +1,5080 @@ + + + + + + + + + + + + + + + + + + + + + + + + + PlaySpeech - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

PlaySpeech

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ PlaySpeech + + +

+ + +
+ + +
+ Source code in Client/Responder/play_speech.py +
18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
class PlaySpeech:
+    @staticmethod
+    def text_to_speech_and_play(content: str):
+        """
+        Convert text to speech and play
+        Args:
+            content (str): The content to be converted to speech
+
+        Returns:
+
+        """
+        # Convert text to speech
+        with timer(logger, "Text to speech"):
+            tts = gTTS(text=content, lang="en")
+        mp3_fp = io.BytesIO()
+        tts.write_to_fp(mp3_fp)
+        mp3_fp.seek(0)
+        with timer(logger, "Load audio"):
+            # Load the audio into pydub
+            audio = AudioSegment.from_file(mp3_fp, format="mp3")
+
+        with timer(logger, "Play audio"):
+            # Play the audio
+            play(audio)
+
+    @staticmethod
+    def play_audio_url(url: str):
+        """
+        Play audio file from the given
+        Args:
+            url (str): The URL of the audio file
+
+        Returns:
+
+        """
+        response = requests.get(url)
+        response.raise_for_status()  # This will raise an exception for HTTP errors
+        with NamedTemporaryFile(delete=True, suffix=".mp3") as temp_file:
+            temp_file.write(response.content)
+            temp_file.flush()  # Make sure all data is written to the file
+
+            # Load the audio into pydub
+            audio = AudioSegment.from_file(temp_file.name, format="mp3")
+
+            # Play the audio
+            play(audio)
+
+    @staticmethod
+    def play_audio_file(file_path: Path):
+        """
+        Play audio file from the given
+        Args:
+            file_path (Path): The path of the audio file
+
+        Returns:
+
+        """
+        # Load the audio into pydub
+        audio = AudioSegment.from_file(file_path, format="mp3")
+
+        # Play the audio
+        play(audio)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ play_audio_file(file_path) + + + staticmethod + + +

+ + +
+ +

Play audio file from the given +Args: + file_path (Path): The path of the audio file

+

Returns:

+ +
+ Source code in Client/Responder/play_speech.py +
65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
@staticmethod
+def play_audio_file(file_path: Path):
+    """
+    Play audio file from the given
+    Args:
+        file_path (Path): The path of the audio file
+
+    Returns:
+
+    """
+    # Load the audio into pydub
+    audio = AudioSegment.from_file(file_path, format="mp3")
+
+    # Play the audio
+    play(audio)
+
+
+
+ +
+ +
+ + +

+ play_audio_url(url) + + + staticmethod + + +

+ + +
+ +

Play audio file from the given +Args: + url (str): The URL of the audio file

+

Returns:

+ +
+ Source code in Client/Responder/play_speech.py +
43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
@staticmethod
+def play_audio_url(url: str):
+    """
+    Play audio file from the given
+    Args:
+        url (str): The URL of the audio file
+
+    Returns:
+
+    """
+    response = requests.get(url)
+    response.raise_for_status()  # This will raise an exception for HTTP errors
+    with NamedTemporaryFile(delete=True, suffix=".mp3") as temp_file:
+        temp_file.write(response.content)
+        temp_file.flush()  # Make sure all data is written to the file
+
+        # Load the audio into pydub
+        audio = AudioSegment.from_file(temp_file.name, format="mp3")
+
+        # Play the audio
+        play(audio)
+
+
+
+ +
+ +
+ + +

+ text_to_speech_and_play(content) + + + staticmethod + + +

+ + +
+ +

Convert text to speech and play +Args: + content (str): The content to be converted to speech

+

Returns:

+ +
+ Source code in Client/Responder/play_speech.py +
19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
@staticmethod
+def text_to_speech_and_play(content: str):
+    """
+    Convert text to speech and play
+    Args:
+        content (str): The content to be converted to speech
+
+    Returns:
+
+    """
+    # Convert text to speech
+    with timer(logger, "Text to speech"):
+        tts = gTTS(text=content, lang="en")
+    mp3_fp = io.BytesIO()
+    tts.write_to_fp(mp3_fp)
+    mp3_fp.seek(0)
+    with timer(logger, "Load audio"):
+        # Load the audio into pydub
+        audio = AudioSegment.from_file(mp3_fp, format="mp3")
+
+    with timer(logger, "Play audio"):
+        # Play the audio
+        play(audio)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Responder/setup/index.html b/Sources/Client/Responder/setup/index.html new file mode 100644 index 00000000..a34a93f4 --- /dev/null +++ b/Sources/Client/Responder/setup/index.html @@ -0,0 +1,4575 @@ + + + + + + + + + + + + + + + + + + + + + Setup - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Setup

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Sources/Client/Responder/utils/index.html b/Sources/Client/Responder/utils/index.html new file mode 100644 index 00000000..b89f5716 --- /dev/null +++ b/Sources/Client/Responder/utils/index.html @@ -0,0 +1,5212 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Utils - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Utils

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ timer + + +

+ + +
+ + +

util function used to log the time taken by a part of program

+ +
+ Source code in Client/Responder/utils.py +
39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
class timer:
+    """
+    util function used to log the time taken by a part of program
+    """
+
+    def __init__(self, logger: Logger, message: str):
+        """
+        init the timer
+
+        Args:
+            logger (Logger): the logger
+            message (str): the message to be logged
+        """
+        self.message = message
+        self.logger = logger
+        self.start = 0
+        self.duration = 0
+        self.sub_timers = []
+
+    def __enter__(self):
+        """
+        context enter to start write this
+        """
+        self.start = time.time()
+        self.logger.info("Starting %s" % self.message)
+        return self
+
+    def __exit__(
+        self,
+        context: Optional[Type[BaseException]],
+        value: Optional[BaseException],
+        traceback: Optional[TracebackType],
+    ):
+        """
+        context exit will write this
+
+        Args:
+            context (Optional[Type[BaseException]]): the context
+            value (Optional[BaseException]): the value
+            traceback (Optional[TracebackType]): the traceback
+        """
+        self.duration = time.time() - self.start
+        self.logger.info(f"Finished {self.message}, that took {self.duration:.3f}")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __enter__() + +

+ + +
+ +

context enter to start write this

+ +
+ Source code in Client/Responder/utils.py +
58
+59
+60
+61
+62
+63
+64
def __enter__(self):
+    """
+    context enter to start write this
+    """
+    self.start = time.time()
+    self.logger.info("Starting %s" % self.message)
+    return self
+
+
+
+ +
+ +
+ + +

+ __exit__(context, value, traceback) + +

+ + +
+ +

context exit will write this

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
context + Optional[Type[BaseException]] + +
+

the context

+
+
+ required +
value + Optional[BaseException] + +
+

the value

+
+
+ required +
traceback + Optional[TracebackType] + +
+

the traceback

+
+
+ required +
+ +
+ Source code in Client/Responder/utils.py +
66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
def __exit__(
+    self,
+    context: Optional[Type[BaseException]],
+    value: Optional[BaseException],
+    traceback: Optional[TracebackType],
+):
+    """
+    context exit will write this
+
+    Args:
+        context (Optional[Type[BaseException]]): the context
+        value (Optional[BaseException]): the value
+        traceback (Optional[TracebackType]): the traceback
+    """
+    self.duration = time.time() - self.start
+    self.logger.info(f"Finished {self.message}, that took {self.duration:.3f}")
+
+
+
+ +
+ +
+ + +

+ __init__(logger, message) + +

+ + +
+ +

init the timer

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
logger + Logger + +
+

the logger

+
+
+ required +
message + str + +
+

the message to be logged

+
+
+ required +
+ +
+ Source code in Client/Responder/utils.py +
44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
def __init__(self, logger: Logger, message: str):
+    """
+    init the timer
+
+    Args:
+        logger (Logger): the logger
+        message (str): the message to be logged
+    """
+    self.message = message
+    self.logger = logger
+    self.start = 0
+    self.duration = 0
+    self.sub_timers = []
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ get_logger(logger_name=None, stream=True) + +

+ + +
+ +

init the logger, give it proper format, log them both in terminal stream and file

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
logger_name + Optional[str] + +
+

the logger name

+
+
+ None +
stream + bool + +
+

whether to log in the terminal stream

+
+
+ True +
+ +
+ Source code in Client/Responder/utils.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
def get_logger(logger_name: Optional[str] = None, stream: bool = True):
+    """
+    init the logger, give it proper format, log them both in terminal stream and file
+
+    Args:
+        logger_name (Optional[str]): the logger name
+        stream (bool): whether to log in the terminal stream
+    """
+    logging.basicConfig(
+        format="%(name)s: %(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
+        datefmt="%Y-%m-%d:%H:%M:%S",
+        level=logging.INFO,
+    )
+
+    logger = logging.getLogger(logger_name)
+    logger.setLevel(logging.INFO)
+    logger.propagate = False
+    formatter = logging.Formatter(
+        "CLIENT: %(name)s | %(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
+    )
+    if not logger.hasHandlers() and stream:
+        stdout_handler = logging.StreamHandler()
+        stdout_handler.setFormatter(formatter)
+        stdout_handler.setLevel(logging.INFO)
+        logger.addHandler(stdout_handler)
+
+    return logger
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Tutorial/annotation_customisation/index.html b/Tutorial/annotation_customisation/index.html new file mode 100644 index 00000000..c1cd2795 --- /dev/null +++ b/Tutorial/annotation_customisation/index.html @@ -0,0 +1,4852 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Customize Annotation - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Annotation customisation

+

Explanation

+

The annotation is built upon the Django Admin interface, which is a powerful tool to manage the data and easy to do the +customisation.

+

This is one of the reason why we choose Django as the backend framework, as there are heaps of documentations, tools, +packages if you need something more here.

+

Conversation Annotation

+

It is built upon the Django change_list template, the code to make this happen is in API/hardware/admin.py +and API/hardware/forms.py

+

The class DataMultiModalConversationAdmin is where we implement it.

+

First we will look at the conversation model to see where we store the annotation data:

+
# API/hardware/models.py
+
+class DataMultiModalConversation(models.Model):
+    # ...
+    annotations = models.JSONField(
+        help_text="The annotations of the emotion detection",
+        null=True,
+        blank=True,
+        default=dict,
+    )
+
+    multi_turns_annotations = models.JSONField(
+        help_text="The annotations of the multi-turns",
+        null=True,
+        blank=True,
+        default=dict,
+    )
+    tags = TaggableManager(blank=True)
+
+

Which means, the annotation will be saved to these two json field: annotations and multi_turns_annotations.

+

The saved json will be in following schema:

+
{
+  "1": {
+    // 1 is the user id
+    "annotation_speech2text": "your annotation",
+    "annotation_speech2text_score": 3
+    // from 0-5
+    // ...
+  }
+}
+
+

The annotation field is in

+
# in API/hardware/forms.py
+class MultiModalAnnotationForm(forms.ModelForm):
+    annotation_speech2text = forms.CharField(
+        required=False,
+        widget=forms.Textarea(attrs={"rows": 1}),
+        help_text="Please provide your annotation for the speech-to-text task.",
+    )
+    annotation_speech2text_score = forms.IntegerField(
+        initial=0,
+        widget=forms.NumberInput(attrs={"min": 0, "max": 5}),
+        required=False,
+        help_text="Score for the speech-to-text results, score from 0 to 5.",
+    )
+    annotation_text_generation = forms.CharField(
+        required=False,
+        widget=forms.Textarea(attrs={"rows": 1}),
+        help_text="Please provide your annotation for the text generation task.",
+    )
+
+    annotation_text_generation_score = forms.IntegerField(
+        initial=0,
+        widget=forms.NumberInput(attrs={"min": 0, "max": 5}),
+        required=False,
+        help_text="Score for the text generation results, score from 0 to 5.",
+    )
+
+    annotation_text2speech_score = forms.IntegerField(
+        initial=0,
+        widget=forms.NumberInput(attrs={"min": 0, "max": 5}),
+        required=False,
+        help_text="Score for the text-to-speech results, score from 0 to 5.",
+    )
+
+    annotation_overall_score = forms.IntegerField(
+        initial=0,
+        widget=forms.NumberInput(attrs={"min": 0, "max": 5}),
+        required=False,
+        help_text="Overall score for this multi-modal task, score from 0 to 5.",
+    )
+
+    annotation_overall_comment = forms.CharField(
+        required=False,
+        widget=forms.Textarea(attrs={"rows": 1}),
+        help_text="Please provide your overall annotation for this multi-modal task.",
+    )
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        if self.instance.annotations:
+            current_user_annotation = self.instance.annotations.get(
+                str(self.current_user.id), {}
+            )
+            for key, value in current_user_annotation.items():
+                if key in self.fields:
+                    self.fields[key].initial = value
+        if self.instance.multi_turns_annotations:
+            current_user_annotation = self.instance.multi_turns_annotations.get(
+                str(self.current_user.id), {}
+            )
+            for key, value in current_user_annotation.items():
+                if key in self.fields:
+                    self.fields[key].initial = value
+
+

These are the fields show up in the change_list page, as show below:

+

Annotation_metrics

+

The code inside the __init__ function is in charge of making sure present the data you already annotated to you.

+

The annotation benchmark and details are analysing the data in the annotations and multi_turns_annotations field.

+

If you want to add a customised annotation field, all you need to do is to add a field in +the MultiModalAnnotationForm.

+

Customised Component Annotation

+

For example, we have an emotion detection task, which will not fit into the conversation model to do the annotation, it +is more like an intermediate task and output for the whole pipeline, however, the quality of it still very important.

+

So we also want to be able to annotate these types of tasks.

+

Especially during the process to do the application development, we may introduce specific specialised tasks for +different purposes.

+

In general, to annotate these types of tasks, the context is still the input and output of the conversation, so when we +design the model, using emotion detection as an example, we use a FK point to the Conversation model.

+
class ContextEmotionDetection(models.Model):
+    multi_modal_conversation = models.ForeignKey(
+        DataMultiModalConversation,
+        on_delete=models.CASCADE,
+        related_name="emotion_detection",
+        null=True,
+        blank=True,
+    )
+    result = models.JSONField(
+        help_text="The emotion result of the text", null=True, blank=True, default=dict
+    )
+    logs = models.JSONField(
+        help_text="The logs of the emotion detection",
+        null=True,
+        blank=True,
+        default=dict,
+    )
+    created_at = models.DateTimeField(
+        auto_now_add=True, help_text="The created time of the emotion detection"
+    )
+    updated_at = models.DateTimeField(
+        auto_now=True, help_text="The updated time of the emotion detection"
+    )
+
+    annotations = models.JSONField(
+        help_text="The annotations of the emotion detection",
+        null=True,
+        blank=True,
+        default=dict,
+    )
+
+    class Meta:
+        verbose_name = "Context Emotion"
+        verbose_name_plural = "Context Emotions"
+
+

As the conversation model, we also have the annotations field to store the annotation data.

+

The schema of the annotation data is the same as the conversation model.

+

The annotation form is in API/hardware/forms.py:

+

+
+class MultiModalFKEmotionDetectionAnnotationForm(forms.ModelForm):
+    annotation_overall = forms.IntegerField(
+        initial=0,
+        help_text="Overall score for this emotion detection task, score from 0 to 5.",
+    )
+    annotation_overall.widget.attrs.update({"min": 0, "max": 5})
+
+    annotation_text_modality = forms.IntegerField(
+        initial=0, help_text="Score for text modality."
+    )
+    annotation_text_modality.widget.attrs.update({"min": 0, "max": 5})
+
+    annotation_audio_modality = forms.IntegerField(
+        initial=0, help_text="Score for audio modality."
+    )
+    annotation_audio_modality.widget.attrs.update({"min": 0, "max": 5})
+
+    annotation_video_modality = forms.IntegerField(
+        initial=0, help_text="Score for video modality."
+    )
+    annotation_video_modality.widget.attrs.update({"min": 0, "max": 5})
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        if self.instance.annotations:
+            current_user_annotation = self.instance.annotations.get(
+                str(self.current_user.id), {}
+            )
+            for key, value in current_user_annotation.items():
+                if key in self.fields:
+                    self.fields[key].initial = value
+
+

The form is similar to the conversation annotation form, but the fields are different.

+

So if you want to implement one by yourself, all you need to do is copying this for your model.

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Tutorial/benchmark_and_annotation/index.html b/Tutorial/benchmark_and_annotation/index.html new file mode 100644 index 00000000..dedcc46c --- /dev/null +++ b/Tutorial/benchmark_and_annotation/index.html @@ -0,0 +1,4741 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Benchmark and Annotation - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Evaluation and Annotation Benchmark

+

So as we mentioned before, we will have two perspectives to evaluate the performance of the pipeline.

+
    +
  • Latency
  • +
  • Accuracy
  • +
+

Latency

+

For the latency part, if you log the time point and duration of each task within the Agent module, you should be able +to +automatically get the latency of each round of conversation. +And the results will be presented in two ways:

+
    +
  • Summary Latency Benchmark
  • +
  • Detailed Latency Benchmark
  • +
+

The above figure is the Detailed Latency Benchmark, which will show the latency of each round of conversation.

+

The below figure is the Summary Latency Benchmark, which will show the summary statistics of the latency.

+

summary_latency

+

Accuracy

+

For the accuracy part, some of the metrics can be automatically calculated, such as WER for Speech2Text. However, +currently, most of the metrics will need human annotation. +And research about how to get this to be automated is still ongoing and worth investigating.

+

So to solve the problem, we build the annotation functionality for the accuracy part.

+

Annotation System

+

Conversation Annotation

+

We have a table(Model concept in Django) called conversation to record each round of conversation.

+

The conversation will be associated with the input

+
    +
  • Data Audio
  • +
  • Data Video (associated with the Data Audio)
  • +
  • Data Text (Converted by Speech2Text Task)
  • +
+

and the output

+
    +
  • Response Text (Generated by the Agent module, Optional)
  • +
  • Generated Audio (Generated by the Text2Speech or directly module, Optional)
  • +
+

And the annotation will be based on the input and output.

+

As shown in this figure:

+

Annotation

+

Initially, the evaluation measurement metrics we built in includes a score [0,5], 5 means the response is perfect, 0 +means the response is totally wrong. +In this way, we can calculate a quantitative score for the performance of each component within pipeline.

+

So for each conversation, you can annotate

+
    +
  • Speech2Text Score: Whether it is perfect, excellent, good, fair, poor, or bad
  • +
  • Give Speech2Text correct text
  • +
  • Text Generation Score: Same as the score above, evaluate the response text
  • +
  • Give Text Generation proper text
  • +
  • Text2Speech Score: Same as the score above, evaluate the generated audio
  • +
  • Overall Score: Overall score for the conversation
  • +
  • Comments: Any comments you want to add, which will be also shown in the benchmark page
  • +
+

One conversation can be annotated by multiple people, and the final score will be the average of all the scores.

+

The below figure shows the default annotation score:

+

Annotation_metrics

+

And the overview of the annotation for a conversation:

+

Annotation_overview

+

The annotated details will be shown in the bottom.

+

Customised Component Annotation

+

And for a specific component within the pipeline, which will not fit in the conversation table above, we will have a +separate table to record the annotation. +For example, the emotion detection will be a customized task we defined and developed, so we will have a separate table +to record the annotation.

+

Compared to the above setup, the context part(input and output) will be the same, the annotation measurement metrics +will be different:

+

Annotation_metrics_emotion

+

Multi-turn Conversation Annotation

+

The conversations actually is mulit-turn, which means we also want to be able to annotate multiple turns conversation.

+

This is also supported.

+

You can assign a "Tag" to a group of conversations, then the last conversation within the group will have an extra +annotated field called "Multi turn annotation overall score and comment".

+

multi_turn

+

During and after the annotation process, you can track the progress by the Accuracy|Detail page. +For example:

+

annotation_progress

+

After all annotation is done, you can view the summary of the accuracy by the Accuracy|Benchmark page.

+

accuracy_overall

+

And your multi turn conversation results can be checked with Accuracy|Multi-Turn Conversation page

+

multi_turn_overall

+

In summary, for the evaluation benchmark, latency can be automatically calculated, and accuracy will need human +annotation. +Our tool can help the advancement of the auto or semi-auto evaluation accuracy metrics development by collecting this +kind of data.

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Tutorial/case_study/index.html b/Tutorial/case_study/index.html new file mode 100644 index 00000000..523f3769 --- /dev/null +++ b/Tutorial/case_study/index.html @@ -0,0 +1,4733 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Case Study - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Case Study

+

After the system is set up, we tested the pipeline with two cases:

+
    +
  • US Election Debate:
      +
    • In this case, we download the video, and then sample the video into segments, which will be used as video and + audio input for the pipeline.
    • +
    • This way can solve one of the problem that the multimodal conversational Agent research is lacking of the dataset.
    • +
    +
  • +
  • Assist the visually impaired:
      +
    • This is in the real world scenario, where we will use the pipeline to assist the visually impaired people when + they + are indoors.
    • +
    • It is not that critical for the latency, so potential can be applied even in current latency situation, because + the Agent accuracy is quite high now.
    • +
    +
  • +
+

US Election Debate

+

One of the intensive conversational scenarios is the debate. +We extracted segments from the US Presidential Debate 2024 +between Biden and Trump, focusing on Biden addressing the +public and handling questions. +These segments were fed into our pipeline to evaluate its performance under different +configurations:

+
    +
  • OpenAI's Whisper for speech-to-text, GPT-4o vision model, and text-to-speech (GPT4O_ETE);
  • +
  • a locally deployed quantization LLM with Whisper, text-to-speech, and our emotion detection model for video ( + QuantizationLLM_ETE)
  • +
  • replacing the quantization LLM with Hugging Face LLM for inference (HF_ETE)
  • +
  • and a version using only Whisper, GPT-3.5, and text-to-speech, ignoring video modality (GPT35_ETE).
  • +
+

We ran the Agent modules on a NVIDIA-3080 GPU with 12GB memory.

+

To replicate what we are doing, you can download the video +from link

+

Put in under the folder Client/Listener/data/mock/US-Election-2024.mp4

+

And then you can run the following command:

+
cd ./Client/Listener
+source venv/bin/activate
+ python3 -m mock.data_extraction --input_video_path your/repo/path/US-Election-2024.mp4 --api_domain  https://openomni.ai4wa.com --token your_token --time_points 02:53,3:20,20:20,20:39,33:38,34:18,55:15,55:40,80:05,80:18
+
+

You need to run the rest modules, make the running properly

+

And all the pipelines we have set up will be fired, all you need to do is to wait for the results.

+

Results

+

gpt-4o-assist-latency +gpt-4o

+

After annotation, the accuracy performance is here:

+

accuracy_gpt4o

+

The fastest configuration is GPT35_ETE, averaging around 15 seconds, with most of the time consumed by the +text-to-speech part, as the generated content is quite long and comprehensive. The slowest configuration is HF_ETE, +taking around 189 seconds, with the LLM model inference step taking the longest time. QuantizationLLM_ETE takes around +60 seconds, with LLM model inference taking around 28 seconds and our emotion detection model taking around 10 seconds.

+

After annotation with our provided interface, the accuracy statistics are automatically generated. As shown in Figure~ +\ref{fig:gpt4oaccuracy}, speech-to-text accuracy is good, while text-to-speech can be improved with more natural emotion +or personality. The text generation, however, is often too general and sometimes inappropriate. Biden's responses are +more in-context and supported by evidence. The only question where our pipeline performed well was the subjective +question about Biden's age, where the GPT-4o pipeline excelled.

+

The GPT35_ETE pipeline had the best overall accuracy, but its responses were often in-context yet pompous. Thus, Biden +still outperforms AI.

+

In conclusion, AI cannot be the President of the US for now, from both latency and accuracy perspectives.

+

Help visual impaired people when indoors

+

While latency and the need for external information currently prevent AI from being the President of the US, the current +state of conversational Agent can be production-ready and useful for areas that are not latency-critical and do not require +extensive external knowledge. Assisting indoor activities for the visually impaired can be one such potential +application area.

+

We set up a camera, microphone, and speaker, and then prepared several types of questions useful for the visually +impaired population. These included questions about the location of specific objects and how to grab them, navigating to +another position indoors, and inquiries about the surrounding environment. We sampled six questions and fed them to the +GPT4O_ETE pipeline. The latency statistics show that each +conversational request from the user is responded to within approximately 30 seconds, which is expected under the +hardware setup.

+

After annotation with context, the accuracy performance is impressive, with an overall score of 4.7/5. Most responses +are accurate; however, the LLM lacks specific skills for assisting the visually impaired. For example, when asked where +the keyboard is, the response could include more instructive steps on how to grab it rather than a general description. +This indicates that while the conversational Agent is nearly production-ready for assisting the visually impaired with +indoor activities, improvements in latency and response content are still needed.

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Tutorial/index.html b/Tutorial/index.html new file mode 100644 index 00000000..fc59c5de --- /dev/null +++ b/Tutorial/index.html @@ -0,0 +1,4564 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Table of Content - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Tutorial/pipeline_customisation/index.html b/Tutorial/pipeline_customisation/index.html new file mode 100644 index 00000000..2ad5d531 --- /dev/null +++ b/Tutorial/pipeline_customisation/index.html @@ -0,0 +1,5207 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Customize Pipeline - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Pipeline customisation

+

Explanation

+

We have provided a list of built-in Pipelines for demonstration and evaluation purpose.

+

For example, we got:

+
    +
  • CLUSTER_Q_ETE_CONVERSATION:
      +
    • Speech2Text with local Whisper
    • +
    • Emotion Detection
    • +
    • Quantization Local LLM
    • +
    • Text2Speech
    • +
    +
  • +
  • CLUSTER_Q_NO_EMOTION_ETE_CONVERSATION_NAME:
      +
    • Speech2Text with local Whisper
    • +
    • Emotion Detection
    • +
    • Quantization Local LLM
    • +
    • Text2Speech
    • +
    +
  • +
  • CLUSTER_HF_ETE_CONVERSATION:
      +
    • Speech2Text with local Whisper
    • +
    • Emotion Detection
    • +
    • HuggingFace Local LLM
    • +
    • Text2Speech
    • +
    +
  • +
  • CLUSTER_GPT_4O_ETE_CONVERSATION:
      +
    • Speech2Text OpenAI API
    • +
    • GPT-4o with image and text
    • +
    • Text2Speech
    • +
    +
  • +
  • CLUSTER_GPT_4O_TEXT_ETE_CONVERSATION:
      +
    • Speech2Text OpenAI API
    • +
    • GPT-4o with text only
    • +
    • Text2Speech
    • +
    +
  • +
  • CLUSTER_GPT_35_ETE_CONVERSATION:
      +
    • Speech2Text OpenAI API
    • +
    • GPT-3.5 with text
    • +
    • Text2Speech
    • +
    +
  • +
  • CLUSTER_GPT_35_RAG_ETE_CONVERSATION:
      +
    • Speech2Text OpenAI API
    • +
    • GPT-3.5 with text
    • +
    • RAG
    • +
    • Text2Speech
    • +
    +
  • +
+

After the evaluation, we found out, for all the pipelines, under the Nvidia 3080 GPU, none of the latency is +acceptable. +The best performance is the GPT-3.5 pipeline with text only as input, which has a latency of around 8-10 seconds. +For the GPT-4o, the API latency is around 3-8 seconds, when you feed more images data in, the latency will increase +significantly.

+

So if you have an idea, and a solution, you want to test out whether it is acceptable, how should you do that?

+

First go to the code place: API/orchestrator/chian/clusters.py

+

This is the places we put all the pipeline configurations, as shown above.

+

Here is an example of the pipeline configuration:

+

+"""
+Cluster for gpt3.5 model and gpt3.5 with RAG
+"""
+CLUSTER_GPT_35_RAG_ETE_CONVERSATION_NAME = "CLUSTER_GPT_35_RAG_ETE_CONVERSATION"
+CLUSTER_GPT_35_RAG_ETE_CONVERSATION = {
+    "openai_speech2text": {
+        "order": 0,
+        "extra_params": {},
+        "component_type": "task",
+        "task_name": "openai_speech2text",
+    },
+    "completed_openai_speech2text": {
+        "order": 1,
+        "extra_params": {},
+        "component_type": "signal",
+        "task_name": None,
+    },
+    "created_data_text": {
+        "order": 2,
+        "extra_params": {},
+        "component_type": "signal",
+        "task_name": None,
+    },
+    "completed_rag": {
+        "order": 3,
+        "extra_params": {},
+        "component_type": "task",
+        "task_name": "rag",
+    },
+    "completed_openai_gpt_35": {
+        "order": 4,
+        "extra_params": {
+            "prompt_template": """{text}""",
+        },
+        "component_type": "task",
+        "task_name": "openai_gpt_35",
+    },
+    "completed_openai_text2speech": {
+        "order": 5,
+        "extra_params": {},
+        "component_type": "task",
+        "task_name": "openai_text2speech",
+    },
+}
+
+

First, we need to define a cluster name, which is the pipeline. +This cluster name will be the one when you started your audio acquisition, you can specify which cluster you want to use +by the --track_cluster

+

When you stop talk and the audio acquisition will send the audio you spoke to the API with a track_id, which is in the +format of T-{cluster_name}-{uid_for_this_conversation}. +Like: T-CLUSTER_GPT_35_RAG_ETE_CONVERSATION-f6bf3b78e4f5484abf949790c8451856.

+

API side will base on the cluster_name to trigger the relevant pipeline and tasks, and all the downstream task for +this conversation within the pipeline will be grouped with this track_id to ensure the pipeline observability.

+

We have a table called Task to manage all the different types of tasks, this can be decomposed to a queue system if we +want to bring this into production for more complex design. +Currently, to maintain a simple and flexible design, every Agent task will be recorded inside the Task table, and we will +base on this table to analyse the progress of the pipeline, health of the system.

+

tasks

+

For example, with the track_id above, the example pipeline will be triggered.

+

First, it will go to create a task, which name will be openai_speech2text, and status will be pending with proper +parameters.

+

Agent consumer will consume this task, and after the task is done, it will update this task record with the +status completed. +And the metadata generated during the Agent module running process will be saved in the result_json field, with two +primary key

+
    +
  • result_profile: this will store the results we expect for this task, like the generated text
  • +
  • latency_profile: this will store the time point information for critical time points and duration information for both + model inference and data transfer.
  • +
+

It will be like this:

+

task

+

When Agent module call API endpoint to update the task status, it will trigger a completed_task Signal ( +check Django Signal for further details), which is acting as +the Router to dispatch different following tasks.

+

The specific code to implement this is in API/orchestrator/models.py, line 114-119

+
def save(self, *args, **kwargs):
+    # if it is updated, then we need to call the chain
+    if self.result_status == "completed":
+        completed_task.send(sender=self, data=self.__dict__)
+    super().save(*args, **kwargs)
+
+

This will override the Django Model save function for Task, when the result_status is changing to completed, it will +trigger the completed_task signal.

+

The completed_task signal is defined in API/orchestrator/signals.py

+
from django.dispatch import Signal
+
+completed_task = Signal()  # task itself
+
+

The receiver of this signal is defined in API/orchestrator/chain/complted_task.py

+
from django.dispatch import receiver
+
+from authenticate.utils.get_logger import get_logger
+from orchestrator.chain.models import TaskData
+from orchestrator.chain.signals import (
+    completed_emotion_detection,
+    completed_hf_llm,
+    completed_openai_gpt_4o_text_and_image,
+    completed_openai_gpt_4o_text_only,
+    completed_openai_gpt_35,
+    completed_openai_speech2text,
+    completed_openai_text2speech,
+    completed_quantization_llm,
+    completed_rag,
+    completed_speech2text,
+    completed_task,
+    completed_text2speech,
+)
+from orchestrator.models import Task
+
+logger = get_logger(__name__)
+
+
+@receiver(completed_task)
+def trigger_completed_task(sender, **kwargs):
+    """
+    Trigger the multi-modal emotion detection.
+    """
+    data = kwargs.get("data", {})
+    task_data = TaskData(**data)
+
+    if task_data.task_name == "speech2text":
+        return completed_speech2text.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "emotion_detection":
+        return completed_emotion_detection.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "quantization_llm":
+        return completed_quantization_llm.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "text2speech":
+        logger.info("Text2Speech task completed")
+        return completed_text2speech.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "hf_llm":
+        logger.info("HF LLM task completed")
+        return completed_hf_llm.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "openai_speech2text":
+        logger.info("OpenAI Speech2Text task completed")
+        return completed_openai_speech2text.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "openai_gpt_4o_text_and_image":
+        logger.info("OpenAI GPT4O task completed")
+        return completed_openai_gpt_4o_text_and_image.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+    if task_data.task_name == "openai_gpt_35":
+        logger.info("OpenAI GPT3.5 task completed")
+        return completed_openai_gpt_35.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    if task_data.task_name == "openai_gpt_4o_text_only":
+        logger.info("OpenAI GPT4O Text Only task completed")
+        return completed_openai_gpt_4o_text_only.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+    if task_data.task_name == "rag":
+        logger.info("RAG task completed")
+        return completed_rag.send(sender=sender, data=data, track_id=task_data.track_id)
+
+    if task_data.task_name == "openai_text2speech":
+        logger.info("OpenAI Text2Speech task completed")
+        return completed_openai_text2speech.send(
+            sender=sender, data=data, track_id=task_data.track_id
+        )
+
+    task_name_choices = Task.get_task_name_choices()
+    task_name_choices_list = [task[0] for task in task_name_choices]
+    if task_data.task_name not in task_name_choices_list:
+        logger.error("Task name not found is not in the choices list")
+        return
+    logger.critical(f"{task_data.task_name} task completed, however, no action taken.")
+
+

We can see from the code, what it is doing is to use the track_id to match the cluster name, and then +base on the configuration of this cluster, identify the next component within the cluster(pipeline).

+

For example, the steps will be like:

+
    +
  • when openai_speech2text task is finished
  • +
  • completed_task is then triggered
  • +
  • It will base on the current task name trigger the downstream Signal, which will go to trigger + the completed_openai_speech2text
  • +
  • completed_openai_speech2text receiver is in API/orchestrator/chain/completed_openai_speech2text.py, it will + process the results into DataText object, save it to the database.
  • +
  • Then it will identify the current cluster based on track_id, and then identify the next component within the pipeline + based on current task name, which is created_data_text. The class ClusterManager will be in charge of this.
  • +
  • If it is a signal component, the signal will be dispatch, and the receiver will take the input and do the next step.
  • +
  • If it is a task component, it will create next task, with the extra_params added to the parameters, and then save + it to the database, the Agent module will listen to this, and consume it.
  • +
  • The process will repeat like this until it reaches the end of the pipeline.
  • +
+

ClusterManager code is in API/orchestrator/chain/manager.py

+
"""
+
+Here will define a list of clusters
+
+Each cluster will have a list of chain components
+
+For example, end-to-end conversation chain will have the following components:
+
+- completed_speech2text
+- created_data_text
+- completed_emotion_detection
+- completed_quantization_llm
+- completed_text2speech
+"""
+
+from typing import Optional, Tuple
+
+from authenticate.utils.get_logger import get_logger
+from orchestrator.chain.clusters import CLUSTERS
+from orchestrator.chain.signals import created_data_text
+from orchestrator.models import Task
+
+logger = get_logger(__name__)
+
+
+class ClusterManager:
+
+    @staticmethod
+    def get_cluster(cluster_name: str):
+        """
+        Get the cluster
+
+        Args:
+            cluster_name (str): The cluster name
+        """
+        if cluster_name in CLUSTERS:
+            return CLUSTERS[cluster_name]
+        return None
+
+    @staticmethod
+    def get_next_chain_component(
+            cluster: dict, current_component: str
+    ) -> Tuple[Optional[str], Optional[dict]]:
+        """
+        Get the next chain
+
+        Args:
+            cluster (dict): The cluster
+            current_component (str): The current component
+
+        Return:
+            Tuple[Optional[str], Optional[dict]]: The next component and its parameters if exists, otherwise None
+        """
+        chain = []
+        for key, value in cluster.items():
+            chain.append(key)
+        chain.sort(key=lambda x: cluster[x]["order"])
+        if current_component == "init":
+            """
+            If this is the start of the chain, then return the first component
+            """
+            return chain[0], cluster[chain[0]]
+        # index of the current component
+        current_component_index = chain.index(current_component)
+        next_index = current_component_index + 1
+        if next_index >= len(chain):
+            return None, None
+        return chain[next_index], cluster[chain[next_index]]
+
+    @classmethod
+    def get_next(cls, cluster_name: str, current_component: str):
+        """
+        Get the next component
+
+        Args:
+            cluster_name (str): The cluster name
+            current_component (str): The current component
+        """
+        cluster = cls.get_cluster(cluster_name)
+        if cluster is None:
+            return None
+        return ClusterManager.get_next_chain_component(cluster, current_component)
+
+    @classmethod
+    def chain_next(
+            cls,
+            track_id: Optional[str],
+            current_component: str,
+            next_component_params: dict,
+            name: str = None,
+            user=None,
+    ):
+        """
+        Chain to the next component
+
+        Args:
+            current_component (str): The current component
+            track_id (str): The track ID
+            next_component_params (dict): The next component parameters
+            name (str): The task name, it will be used to aggregate the task
+            user (None): The user
+        """
+        logger.info(f"Current component: {current_component}")
+        logger.info(f"Next component params: {next_component_params}")
+        cluster_name = track_id.split("-")[1]
+        next_component_name, next_component = cls.get_next(
+            cluster_name, current_component
+        )
+        logger.info(f"Next component: {next_component_name}")
+
+        if next_component_name is None:
+            return
+        # do something with the next component
+        # It can be a task or a signal
+        next_parameters = {
+            **next_component_params,
+            **next_component.get("extra_params", {}),
+        }
+        logger.info(next_parameters)
+        logger.info(next_component_name)
+
+        if next_component["component_type"] == "task":
+            task = Task.create_task(
+                user=user,
+                name=name or next_component["task_name"],
+                task_name=next_component["task_name"],
+                parameters=next_parameters,
+                track_id=track_id,
+            )
+            logger.info(f"Task {task.id} created for {next_component['task_name']}")
+            return task.id
+        elif next_component["component_type"] == "signal":
+            if next_component_name == "created_data_text":
+                created_data_text.send(
+                    sender=next_component_params.get("sender"),
+                    data=next_component_params.get("data"),
+                    track_id=track_id,
+                    user=user,
+                )
+        return None
+
+
+

Demonstration

+

API end

+

So if you want to customise the pipeline, you can add your own cluster configuration +in API/orchestrator/chian/clusters.py.

+

For example, if we want to add a cluster called CLUSTER_VOICE_ETE_CONVERSATION, which will first get the image to +description text, and then feed to an end-to-end voice model with the audio, generate the output audio

+
    +
  • First it will do the image2text
  • +
  • And then it will trigger the voice2voice model
  • +
+

The configuration will be like this:

+
CLUSTER_VOICE_ETE_CONVERSATION_NAME = "CLUSTER_VOICE_ETE_CONVERSATION"
+
+CLUSTER_VOICE_ETE_CONVERSATION = {
+    "image2text": {
+        "order": 0,
+        "extra_params": {},
+        "component_type": "task",
+        "task_name": image2text,
+    },
+    "completed_image2text": {
+        "order": 1,
+        "extra_params": {},
+        "component_type": "signal",
+        "task_name": None,
+    },
+    "completed_voice2voice": {
+        "order": 2,
+        "extra_params": {},
+        "component_type": "task",
+        "task_name": "voice2voice",
+    },
+}
+
+CLUSTERS = {
+    # ...
+    CLUSTER_VOICE_ETE_CONVERSATION_NAME: CLUSTER_VOICE_ETE_CONVERSATION,
+}
+
+

Then add new added task_name to the task_name_choices in API/orchestrator/models.py

+

+@staticmethod
+def get_task_name_choices():
+    """
+    Get dynamic task name choices
+    Returns:
+        list: List of tuples containing task name choices
+    """
+    # Here you can fetch the choices from an external source or database
+    return [
+        #  ...
+        ("rag", "RAG"),
+        ("image2text", "Image2Text"),
+        ("voice2voice", "Voice2Voice"),
+    ]
+
+
+@staticmethod
+def task_ml_task_mapping() -> dict:
+    return {
+        # ...
+        "rag": "rag",
+        "image2text": "image2text",
+        "voice2voice": "voice2voice",
+    }
+
+

This will make sure you can choose the two new added task when create a new Task.

+

Next, you will need to create two new Signals:

+
    +
  • completed_image2text
  • +
  • completed_voice2voice
  • +
+

in API/orchestrator/chain/signals.py

+

+from django.dispatch import Signal
+
+completed_task = Signal()  # task itself
+# ....
+completed_image2text = Signal()
+completed_voice2voice = Signal()
+
+

Then create the two receiver to handle the two signals in API/orchestrator/chain/completed_image2text.py +and API/orchestrator/chain/completed_voice2voice.py

+

Use other existing receiver as reference to implement the new receiver.

+

Then you need to register the two signal in the orchestrator/chain/apps.py

+
def ready(self):  # noqa
+    # Import signals
+    # ....
+    import orchestrator.chain.completed_image2text  # noqa
+    import orchestrator.chain.completed_voice2voice  # noqa
+
+

Until now, the API end is done for this newly added pipeline.

+

Agent end

+

You will need to go to implement the Agent module to consume the new added pipeline, mainly is the added type of tasks.

+

We have added two type of tasks, which means we will need to add two modules to handle this.

+

So create a image2text module in Agent/modules/image2text/__init__.py and a voice2voice module +in Agent/modules/voice2voice/__init__.py

+

You can then implement the code as you want within each of the respective folder, use other modules as reference to +implement it.

+

After that, you will need to register both task in the main.py

+

It will not be hard to add them in if you follow what we have done for others.

+

Test out

+

Then go to the client end, start the audio acquisition, and specify the cluster name to the newly added cluster name.

+

Then start talking, and you will see the pipeline is triggered, and the Agent module will consume the task.

+

If there is anything wrong, try to use the above explanation to debug the problem.

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Tutorial/setup/index.html b/Tutorial/setup/index.html new file mode 100644 index 00000000..dfafd304 --- /dev/null +++ b/Tutorial/setup/index.html @@ -0,0 +1,4671 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Fullstack Setup - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Setup and run the pipeline successfully

+

Deployment mode will be All in One Local Machine for demonstration purposes. +This means all of your components will be running on your local machine or your PC. +To get started, you will need a decent machine (as we will run some local LLMs) with camera, microphone and speaker, +which most of the laptops have.

+

And you will also need to have Python, Docker installed on your machine.

+

Step 1: Clone the repository

+
# switch to a proper directory
+git clone git@github.com:AI4WA/OpenOmniFramework.git
+
+

Step 2: Get API running

+
cd ./OpenOmniFramework
+cd ./API
+# Run it inside docker, this is the easiest way to get started
+docker compose up
+
+

After this, you should be able to access the API at http://localhost:8000. +Username/Password will be admin/password.

+

Step 3: Grab the Token for Authentication

+

Login to the API admin, go to http://localhost:8000/authtoken/tokenproxy/ and click Add Token.

+

Add Token

+

Step 4: Collect Audio and Video Data

+
cd ./OpenOmniFramework
+cd ./Client/Listener
+
+# create the virtual environment if this is your first time run this
+python3 -m venv venv
+source venv/bin/activate
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+# run video acquire
+python3 videos_acquire.py --token your_token_from_step_3
+
+

You should be able to see something like this: +video_cli

+

Then open a new terminal

+
cd ./OpenOmniFramework
+cd ./Client/Listener
+
+# create the virtual environment if this is your first time run this
+python3 -m venv venv
+source venv/bin/activate
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+# run audio acquire
+python3 audios_acquire.py --token your_token_from_step_3 --track_cluster CLUSTER_GPT_4O_ETE_CONVERSATION 
+# you can change the cluster to the one your need
+
+

You will see something like this: +audio_cli

+

If everything works, you should be able to check the newly create Data Audios, Data Videos and Speech2Text Tasks +in API Admin page. +Something like below: +tasks +audio +video

+

Step 5: Run Agent models +Now we need to start Agent module to consume the Tasks.

+
cd ./OpenOmniFramework
+cd ./Agent
+
+python3 -m venv venv
+source venv/bin/activate
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+

Before we start the Agent module, there are some pre-configurations we need to do.

+

As provided functionalities within Agent modules support OpenAI call, HuggingFace call, and there is also our provided +emotion detection module.

+

We need to get them setup first.

+

Setup OpenAI and HuggingFace Environment Variable

+

Create a .env file in ./Agent folder, and add the following content:

+
HF_TOKEN=Your_HuggingFace_Token
+OPENAI_API_KEY=Your_OpenAI_API_KEY
+
+

Otherwise, you can run

+
export HF_TOKEN=Your_HuggingFace_Token
+export OPENAI_API_KEY=Your_OpenAI_API_KEY
+
+

For the model part, if you want to get our emotion detection model running, you will need to download the model +from download link

+

And put it in the folder: ./Agent/data/models/emotion_detection/model_data. +It should be like this

+

emotion_model

+

Then you should be ready to run the Agent module.

+
# run the Agent module
+python3 main.py --token your_token_from_step_3
+
+

You can also skip the steps to install the requirements, directly run the Agent module with docker.

+
TOKEN=XXX docker compose up
+
+

This will allow you to utilise the GPU resources on your machine if you have one.

+

ai_running

+

Until now, you will have the client side to feed the video/audio data to the API, and the Agent module to consume the data.

+

Step 6: Play speech audio in client side

+
cd ./OpenOmniFramework
+cd ./Client/Responder
+
+# create the virtual environment if this is your first time run this
+python3 -m venv venv
+source venv/bin/activate
+pip3 install -r requirements.txt
+pip3 install -r requirements.dev.txt # if you are doing further development
+
+# run the audio player
+
+python3 play_speech.py --token your_token_from_step_3
+
+

You will see something like this:

+

audio_play

+

Until now, you should have the whole pipeline running on your local machine.

+

You should see new tasks created as expected in the Tasks page in the API admin page. +As shown below:

+

tasks

+

And in the Detailed Latency Benchmark page, you should be able to see the latency of each round of conversation.

+

latency

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Tutorial/video_demo/index.html b/Tutorial/video_demo/index.html new file mode 100644 index 00000000..2a596d37 --- /dev/null +++ b/Tutorial/video_demo/index.html @@ -0,0 +1,4683 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Video Demo - OpenOmni Framework by AI4WA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Video Demo

+

Scripts

+

Notes: Here we will use the architecture diagram to explain the scripts.

+

Hello everyone, excited to introduce your our latest work, multimodal Open Source Conversational AI Framework: OpenOmni +Framework.

+

Why We build this comes from these points:

+
    +
  • It is approachable to build an end-to-end conversational AI system now with current models and tools available.
  • +
  • However, if someone want to try out, they will need to speed quite a lot of time to get it work from scratch.
  • +
  • And we do not know whether the latency and accuracy is acceptable or not.
  • +
+

So To make sure people do not re-invent the wheel, we build this framework, for details, you can check our +documentations.

+

Here what we will demo is one of the useful scenario for the framework, use conversational AI to help visually impaired +people to navigate indoors.

+

Notes: Here video will show the three devices, one is the AI module, one is the API module, and one is the client +module.

+

We will use the local network deployment option, deploy AI/API/Client modules within three different devices within the +same network.

+

So the audio, and video data will be collected from this raspberry pi, and then sync to the API server, together with +the metadata.

+

Then the API end will base on the parameters, allocate the task for the AI module, AI will then process the task. For +example, speech2text, llm generation, text2speech.

+

All the computational heavy work will happen here.

+

When the results are finished, the data or generated audio will be sent back to the API side

+

And the client side will have another thread to list to the API side, and then play the audio, fulfill the conversation.

+

Note: then next is the demo.

+

Ok, let's start the audio collection process, all other modules are currently running now.

+

Hi, where is my cup of coffee, can you tell me how to grab it?

+

Notes: Then wait for the response, and then play the audio.

+

After this finished, as a research or benchmark process.

+

Note: show the API interface here.

+

You will directly get the latency details and summary stats from our API interface.

+

We can see how long each module takes, and in total how long it takes to finish the whole process, which part are the +model inference time, which part is data transfer time.

+

Also, we can annotate and benchmark the accuracy of the process, whether the response tones, content is fit for the +scenario.

+

After the annotation, you will be able to see the details and summaries in this page.

+

This can be powerful for the conversational AI system research and application development, you can use this evaluate +different combination of pipeline.

+

Gathering datasets, etc.

+

Hopefully this can benefit the wider community, and we are looking forward to your feedback.

+

Procedure to start the demo

+
    +
  • API: login and run docker compose up for the API module, make sure export STORAGE_SOLUTION=local
  • +
  • Client:
      +
    • login raspberry pi, for Listener, run ./start_pi.sh
    • +
    • login raspberry pi, for Responder, run ./start_pi.sh
    • +
    +
  • +
  • AI:
      +
    • Run python3 storage.py xxx to sync the data
    • +
    • Run python3 main.py to start the AI module
    • +
    +
  • +
+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/assets/_mkdocstrings.css b/assets/_mkdocstrings.css new file mode 100644 index 00000000..85449ec7 --- /dev/null +++ b/assets/_mkdocstrings.css @@ -0,0 +1,119 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Backward-compatibility: docstring section titles in bold. */ +.doc-section-title { + font-weight: bold; +} + +/* Symbols in Navigation and ToC. */ +:root, +[data-md-color-scheme="default"] { + --doc-symbol-attribute-fg-color: #953800; + --doc-symbol-function-fg-color: #8250df; + --doc-symbol-method-fg-color: #8250df; + --doc-symbol-class-fg-color: #0550ae; + --doc-symbol-module-fg-color: #5cad0f; + + --doc-symbol-attribute-bg-color: #9538001a; + --doc-symbol-function-bg-color: #8250df1a; + --doc-symbol-method-bg-color: #8250df1a; + --doc-symbol-class-bg-color: #0550ae1a; + --doc-symbol-module-bg-color: #5cad0f1a; +} + +[data-md-color-scheme="slate"] { + --doc-symbol-attribute-fg-color: #ffa657; + --doc-symbol-function-fg-color: #d2a8ff; + --doc-symbol-method-fg-color: #d2a8ff; + --doc-symbol-class-fg-color: #79c0ff; + --doc-symbol-module-fg-color: #baff79; + + --doc-symbol-attribute-bg-color: #ffa6571a; + --doc-symbol-function-bg-color: #d2a8ff1a; + --doc-symbol-method-bg-color: #d2a8ff1a; + --doc-symbol-class-bg-color: #79c0ff1a; + --doc-symbol-module-bg-color: #baff791a; +} + +code.doc-symbol { + border-radius: .1rem; + font-size: .85em; + padding: 0 .3em; + font-weight: bold; +} + +code.doc-symbol-attribute { + color: var(--doc-symbol-attribute-fg-color); + background-color: var(--doc-symbol-attribute-bg-color); +} + +code.doc-symbol-attribute::after { + content: "attr"; +} + +code.doc-symbol-function { + color: var(--doc-symbol-function-fg-color); + background-color: var(--doc-symbol-function-bg-color); +} + +code.doc-symbol-function::after { + content: "func"; +} + +code.doc-symbol-method { + color: var(--doc-symbol-method-fg-color); + background-color: var(--doc-symbol-method-bg-color); +} + +code.doc-symbol-method::after { + content: "meth"; +} + +code.doc-symbol-class { + color: var(--doc-symbol-class-fg-color); + background-color: var(--doc-symbol-class-bg-color); +} + +code.doc-symbol-class::after { + content: "class"; +} + +code.doc-symbol-module { + color: var(--doc-symbol-module-fg-color); + background-color: var(--doc-symbol-module-bg-color); +} + +code.doc-symbol-module::after { + content: "mod"; +} + +.doc-signature .autorefs { + color: inherit; + border-bottom: 1px dotted currentcolor; +} diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js b/assets/javascripts/bundle.fe8b6f2b.min.js new file mode 100644 index 00000000..cf778d42 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(M){return typeof M}:Ie=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,M){return _&&ro(V.prototype,_),M&&ro(V,M),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Wt(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ri=function(V){Ci(M,V);var _=Hi(M);function M(j,D){var Y;return _i(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),M}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var O=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return O}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),G(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?O:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),G(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():O))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function B(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!B("announce.dismiss")||!e.childElementCount)return O;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:O),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?O:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):O})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:O)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),Na(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>B("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return O;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?O:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return O;let r=e.target.closest("a");if(r===null)return O;if(r.target||e.metaKey||e.ctrlKey)return O;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):O}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),O}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return O;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),O)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>O)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?O:(i.preventDefault(),I(p))}}return O}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),G(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?O:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>O),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>O),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>O),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return O}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return O}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>O),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):O})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),G(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;B("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),G(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>B("search.highlight")?mi(e,{index$:Mi,location$:jt}):O),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),G(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.fe8b6f2b.min.js.map + diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js.map b/assets/javascripts/bundle.fe8b6f2b.min.js.map new file mode 100644 index 00000000..82635852 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see /~https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see /~https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via /~https://github.com/lipsmack from /~https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by /~https://github.com/lazd\n // Ref: /~https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: /~https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: /~https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec /~https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: /~https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an