Lan92 commited on
Commit
8ea71e1
·
verified ·
1 Parent(s): 50ea7cd

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Samples/v_CricketShot_g07_c03.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ Samples/v_PlayingCello_g04_c01.mp4 filter=lfs diff=lfs merge=lfs -text
README (1).md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Video Classification with CNN-RNN
3
+ emoji: 🎬
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: false
9
+ ---
10
+
11
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
  title: Video
3
- emoji: ⚡
4
- colorFrom: green
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 5.25.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Video
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 5.23.3
6
  ---
 
 
Samples-20250408T035849Z-001.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:882856ee78f26bc7e97293b2bb6fffe9088cbc2a3c5abf5b04725941f9a37477
3
+ size 3572793
Samples/v_CricketShot_g07_c03.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27bb7648cd61f972a453bf126c48d1796e8af69bb446ceb85f0bfe8d0835a91b
3
+ size 1492498
Samples/v_PlayingCello_g04_c01.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d46406b76af33b93b3f1482ebf3821ed1051b9cabcb5abda3dbbadcc03c9f43
3
+ size 2079055
app.ipynb ADDED
@@ -0,0 +1,758 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "executionInfo": {
11
+ "elapsed": 19730,
12
+ "status": "ok",
13
+ "timestamp": 1744098140927,
14
+ "user": {
15
+ "displayName": "Lan Hoang",
16
+ "userId": "15367629887304430933"
17
+ },
18
+ "user_tz": -420
19
+ },
20
+ "id": "Vnowuw1FWR6Y",
21
+ "outputId": "18eb608c-528b-4a68-84c1-f83e2d1b5091"
22
+ },
23
+ "outputs": [
24
+ {
25
+ "name": "stdout",
26
+ "output_type": "stream",
27
+ "text": [
28
+ "Requirement already satisfied: gradio in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (5.23.3)\n",
29
+ "Requirement already satisfied: aiofiles<24.0,>=22.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (23.2.1)\n",
30
+ "Requirement already satisfied: anyio<5.0,>=3.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (4.4.0)\n",
31
+ "Requirement already satisfied: fastapi<1.0,>=0.115.2 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.115.12)\n",
32
+ "Requirement already satisfied: ffmpy in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.5.0)\n",
33
+ "Requirement already satisfied: gradio-client==1.8.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (1.8.0)\n",
34
+ "Requirement already satisfied: groovy~=0.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.1.2)\n",
35
+ "Requirement already satisfied: httpx>=0.24.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.27.0)\n",
36
+ "Requirement already satisfied: huggingface-hub>=0.28.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.30.2)\n",
37
+ "Requirement already satisfied: jinja2<4.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (3.1.4)\n",
38
+ "Requirement already satisfied: markupsafe<4.0,>=2.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (2.1.5)\n",
39
+ "Requirement already satisfied: numpy<3.0,>=1.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (1.26.4)\n",
40
+ "Requirement already satisfied: orjson~=3.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (3.10.16)\n",
41
+ "Requirement already satisfied: packaging in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (24.1)\n",
42
+ "Requirement already satisfied: pandas<3.0,>=1.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (2.2.2)\n",
43
+ "Requirement already satisfied: pillow<12.0,>=8.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (10.4.0)\n",
44
+ "Requirement already satisfied: pydantic<2.12,>=2.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (2.11.2)\n",
45
+ "Requirement already satisfied: pydub in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.25.1)\n",
46
+ "Requirement already satisfied: python-multipart>=0.0.18 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.0.20)\n",
47
+ "Requirement already satisfied: pyyaml<7.0,>=5.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (6.0.2)\n",
48
+ "Requirement already satisfied: ruff>=0.9.3 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.11.4)\n",
49
+ "Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.1.6)\n",
50
+ "Requirement already satisfied: semantic-version~=2.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (2.10.0)\n",
51
+ "Requirement already satisfied: starlette<1.0,>=0.40.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.46.1)\n",
52
+ "Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.13.2)\n",
53
+ "Requirement already satisfied: typer<1.0,>=0.12 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.15.2)\n",
54
+ "Requirement already satisfied: typing-extensions~=4.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (4.12.2)\n",
55
+ "Requirement already satisfied: uvicorn>=0.14.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio) (0.34.0)\n",
56
+ "Requirement already satisfied: fsspec in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio-client==1.8.0->gradio) (2025.2.0)\n",
57
+ "Requirement already satisfied: websockets<16.0,>=10.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from gradio-client==1.8.0->gradio) (15.0.1)\n",
58
+ "Requirement already satisfied: idna>=2.8 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from anyio<5.0,>=3.0->gradio) (3.7)\n",
59
+ "Requirement already satisfied: sniffio>=1.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from anyio<5.0,>=3.0->gradio) (1.3.1)\n",
60
+ "Requirement already satisfied: exceptiongroup>=1.0.2 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from anyio<5.0,>=3.0->gradio) (1.2.2)\n",
61
+ "Requirement already satisfied: certifi in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from httpx>=0.24.1->gradio) (2024.7.4)\n",
62
+ "Requirement already satisfied: httpcore==1.* in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from httpx>=0.24.1->gradio) (1.0.5)\n",
63
+ "Requirement already satisfied: h11<0.15,>=0.13 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from httpcore==1.*->httpx>=0.24.1->gradio) (0.14.0)\n",
64
+ "Requirement already satisfied: filelock in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from huggingface-hub>=0.28.1->gradio) (3.17.0)\n",
65
+ "Requirement already satisfied: requests in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from huggingface-hub>=0.28.1->gradio) (2.32.3)\n",
66
+ "Requirement already satisfied: tqdm>=4.42.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from huggingface-hub>=0.28.1->gradio) (4.67.1)\n",
67
+ "Requirement already satisfied: python-dateutil>=2.8.2 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from pandas<3.0,>=1.0->gradio) (2.9.0.post0)\n",
68
+ "Requirement already satisfied: pytz>=2020.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from pandas<3.0,>=1.0->gradio) (2024.1)\n",
69
+ "Requirement already satisfied: tzdata>=2022.7 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from pandas<3.0,>=1.0->gradio) (2024.1)\n",
70
+ "Requirement already satisfied: annotated-types>=0.6.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from pydantic<2.12,>=2.0->gradio) (0.7.0)\n",
71
+ "Requirement already satisfied: pydantic-core==2.33.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from pydantic<2.12,>=2.0->gradio) (2.33.1)\n",
72
+ "Requirement already satisfied: typing-inspection>=0.4.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from pydantic<2.12,>=2.0->gradio) (0.4.0)\n",
73
+ "Requirement already satisfied: click>=8.0.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from typer<1.0,>=0.12->gradio) (8.1.8)\n",
74
+ "Requirement already satisfied: shellingham>=1.3.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from typer<1.0,>=0.12->gradio) (1.5.4)\n",
75
+ "Requirement already satisfied: rich>=10.11.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from typer<1.0,>=0.12->gradio) (13.9.2)\n",
76
+ "Requirement already satisfied: colorama in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from click>=8.0.0->typer<1.0,>=0.12->gradio) (0.4.6)\n",
77
+ "Requirement already satisfied: six>=1.5 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from python-dateutil>=2.8.2->pandas<3.0,>=1.0->gradio) (1.16.0)\n",
78
+ "Requirement already satisfied: markdown-it-py>=2.2.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio) (3.0.0)\n",
79
+ "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio) (2.18.0)\n",
80
+ "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from requests->huggingface-hub>=0.28.1->gradio) (3.3.2)\n",
81
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from requests->huggingface-hub>=0.28.1->gradio) (2.2.2)\n",
82
+ "Requirement already satisfied: mdurl~=0.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio) (0.1.2)\n",
83
+ "Collecting git+https://github.com/tensorflow/docs\n",
84
+ " Cloning https://github.com/tensorflow/docs to c:\\users\\pc\\appdata\\local\\temp\\pip-req-build-cktlp7ez\n",
85
+ " Resolved https://github.com/tensorflow/docs to commit 18c3a45517af27c8513e165b8a52e6bbc1204b99\n",
86
+ " Preparing metadata (setup.py): started\n",
87
+ " Preparing metadata (setup.py): finished with status 'done'\n",
88
+ "Requirement already satisfied: astor in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from tensorflow-docs==2025.3.6.10029) (0.8.1)\n",
89
+ "Requirement already satisfied: absl-py in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from tensorflow-docs==2025.3.6.10029) (2.1.0)\n",
90
+ "Requirement already satisfied: jinja2 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from tensorflow-docs==2025.3.6.10029) (3.1.4)\n",
91
+ "Requirement already satisfied: nbformat in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from tensorflow-docs==2025.3.6.10029) (5.10.4)\n",
92
+ "Requirement already satisfied: protobuf>=3.12 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from tensorflow-docs==2025.3.6.10029) (4.25.5)\n",
93
+ "Requirement already satisfied: pyyaml in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from tensorflow-docs==2025.3.6.10029) (6.0.2)\n",
94
+ "Requirement already satisfied: MarkupSafe>=2.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from jinja2->tensorflow-docs==2025.3.6.10029) (2.1.5)\n",
95
+ "Requirement already satisfied: fastjsonschema>=2.15 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from nbformat->tensorflow-docs==2025.3.6.10029) (2.20.0)\n",
96
+ "Requirement already satisfied: jsonschema>=2.6 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from nbformat->tensorflow-docs==2025.3.6.10029) (4.23.0)\n",
97
+ "Requirement already satisfied: jupyter-core!=5.0.*,>=4.12 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from nbformat->tensorflow-docs==2025.3.6.10029) (5.7.2)\n",
98
+ "Requirement already satisfied: traitlets>=5.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from nbformat->tensorflow-docs==2025.3.6.10029) (5.14.3)\n",
99
+ "Requirement already satisfied: attrs>=22.2.0 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from jsonschema>=2.6->nbformat->tensorflow-docs==2025.3.6.10029) (24.2.0)\n",
100
+ "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from jsonschema>=2.6->nbformat->tensorflow-docs==2025.3.6.10029) (2023.12.1)\n",
101
+ "Requirement already satisfied: referencing>=0.28.4 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from jsonschema>=2.6->nbformat->tensorflow-docs==2025.3.6.10029) (0.35.1)\n",
102
+ "Requirement already satisfied: rpds-py>=0.7.1 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from jsonschema>=2.6->nbformat->tensorflow-docs==2025.3.6.10029) (0.20.0)\n",
103
+ "Requirement already satisfied: platformdirs>=2.5 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from jupyter-core!=5.0.*,>=4.12->nbformat->tensorflow-docs==2025.3.6.10029) (4.2.2)\n",
104
+ "Requirement already satisfied: pywin32>=300 in c:\\users\\pc\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from jupyter-core!=5.0.*,>=4.12->nbformat->tensorflow-docs==2025.3.6.10029) (306)\n"
105
+ ]
106
+ },
107
+ {
108
+ "name": "stderr",
109
+ "output_type": "stream",
110
+ "text": [
111
+ " Running command git clone --filter=blob:none --quiet https://github.com/tensorflow/docs 'C:\\Users\\PC\\AppData\\Local\\Temp\\pip-req-build-cktlp7ez'\n"
112
+ ]
113
+ }
114
+ ],
115
+ "source": [
116
+ "%pip install gradio\n",
117
+ "%pip install git+https://github.com/tensorflow/docs"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": 2,
123
+ "metadata": {
124
+ "executionInfo": {
125
+ "elapsed": 18553,
126
+ "status": "ok",
127
+ "timestamp": 1744098159483,
128
+ "user": {
129
+ "displayName": "Lan Hoang",
130
+ "userId": "15367629887304430933"
131
+ },
132
+ "user_tz": -420
133
+ },
134
+ "id": "nBrcqD-WktG-"
135
+ },
136
+ "outputs": [
137
+ {
138
+ "name": "stderr",
139
+ "output_type": "stream",
140
+ "text": [
141
+ "c:\\Users\\PC\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
142
+ " from .autonotebook import tqdm as notebook_tqdm\n"
143
+ ]
144
+ }
145
+ ],
146
+ "source": [
147
+ "# -*- coding: utf-8 -*-\n",
148
+ "\"\"\"🎬 Keras Video Classification CNN-RNN model\n",
149
+ "\n",
150
+ "Spaces for showing the model usage.\n",
151
+ "\n",
152
+ "Author:\n",
153
+ " - Thomas Chaigneau @ChainYo\n",
154
+ "\"\"\"\n",
155
+ "import os\n",
156
+ "import cv2\n",
157
+ "\n",
158
+ "import gradio as gr\n",
159
+ "import numpy as np\n",
160
+ "\n",
161
+ "from tensorflow import keras\n",
162
+ "\n",
163
+ "from tensorflow_docs.vis import embed\n",
164
+ "\n",
165
+ "from huggingface_hub import from_pretrained_keras"
166
+ ]
167
+ },
168
+ {
169
+ "cell_type": "code",
170
+ "execution_count": null,
171
+ "metadata": {
172
+ "executionInfo": {
173
+ "elapsed": 109,
174
+ "status": "ok",
175
+ "timestamp": 1744098315598,
176
+ "user": {
177
+ "displayName": "Lan Hoang",
178
+ "userId": "15367629887304430933"
179
+ },
180
+ "user_tz": -420
181
+ },
182
+ "id": "2l8ZXA4-XD_-"
183
+ },
184
+ "outputs": [
185
+ {
186
+ "name": "stderr",
187
+ "output_type": "stream",
188
+ "text": [
189
+ "A subdirectory or file Samples already exists.\n"
190
+ ]
191
+ }
192
+ ],
193
+ "source": [
194
+ "%mkdir Samples"
195
+ ]
196
+ },
197
+ {
198
+ "cell_type": "code",
199
+ "execution_count": 18,
200
+ "metadata": {
201
+ "colab": {
202
+ "base_uri": "https://localhost:8080/",
203
+ "height": 677,
204
+ "referenced_widgets": [
205
+ "7953decbcc66485d9e477a9d45777f99",
206
+ "f8709807dedf4e349dd9d463db6abac7",
207
+ "53a737a6c2794edcbd2d454273753b8b",
208
+ "5b22eb49678f44c09cf7e5a6007133de",
209
+ "32364e76989b40b39659c3f93e727fde",
210
+ "cb23ba857ca548e88967a0ef2d3f32e4",
211
+ "70331cb723934c37abd7a1815a969841",
212
+ "7c618cdc320f4cb3909fe11d4124cd85",
213
+ "d9fb30527f9a4bed9896bf89cbd4eb54",
214
+ "fd8b65aab251427bac9380d843ad02e1",
215
+ "9a3b710001334adc8684c2227e2b556b"
216
+ ]
217
+ },
218
+ "executionInfo": {
219
+ "elapsed": 9546,
220
+ "status": "ok",
221
+ "timestamp": 1744103445447,
222
+ "user": {
223
+ "displayName": "Lan Hoang",
224
+ "userId": "15367629887304430933"
225
+ },
226
+ "user_tz": -420
227
+ },
228
+ "id": "G_1_pfzxlXSo",
229
+ "outputId": "f1b2fb37-45b1-450b-ff8e-09fcb6f8ee55"
230
+ },
231
+ "outputs": [
232
+ {
233
+ "data": {
234
+ "application/vnd.jupyter.widget-view+json": {
235
+ "model_id": "7953decbcc66485d9e477a9d45777f99",
236
+ "version_major": 2,
237
+ "version_minor": 0
238
+ },
239
+ "text/plain": [
240
+ "Fetching 11 files: 0%| | 0/11 [00:00<?, ?it/s]"
241
+ ]
242
+ },
243
+ "metadata": {},
244
+ "output_type": "display_data"
245
+ },
246
+ {
247
+ "name": "stdout",
248
+ "output_type": "stream",
249
+ "text": [
250
+ "Running Gradio in a Colab notebook requires sharing enabled. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
251
+ "\n",
252
+ "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
253
+ "* Running on public URL: https://06f7718827d3f617a0.gradio.live\n",
254
+ "\n",
255
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
256
+ ]
257
+ },
258
+ {
259
+ "data": {
260
+ "text/html": [
261
+ "<div><iframe src=\"https://06f7718827d3f617a0.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
262
+ ],
263
+ "text/plain": [
264
+ "<IPython.core.display.HTML object>"
265
+ ]
266
+ },
267
+ "metadata": {},
268
+ "output_type": "display_data"
269
+ },
270
+ {
271
+ "data": {
272
+ "text/plain": []
273
+ },
274
+ "execution_count": 18,
275
+ "metadata": {},
276
+ "output_type": "execute_result"
277
+ }
278
+ ],
279
+ "source": [
280
+ "# Kích thước ảnh đầu vào và số lượng đặc trưng\n",
281
+ "IMG_SIZE = 224\n",
282
+ "NUM_FEATURES = 2048\n",
283
+ "\n",
284
+ "# Tải mô hình CNN-RNN từ HuggingFace\n",
285
+ "model = from_pretrained_keras(\"keras-io/video-classification-cnn-rnn\")\n",
286
+ "\n",
287
+ "# Tạo danh sách video ví dụ từ thư mục Samples\n",
288
+ "samples = []\n",
289
+ "for file in os.listdir(\"Samples\"):\n",
290
+ " tag = file.split(\"_\")[1]\n",
291
+ " samples.append([f\"samples/{file}\"])\n",
292
+ "\n",
293
+ "# Cắt phần hình vuông ở trung tâm frame\n",
294
+ "def crop_center_square(frame):\n",
295
+ " y, x = frame.shape[0:2]\n",
296
+ " min_dim = min(y, x)\n",
297
+ " start_x = (x // 2) - (min_dim // 2)\n",
298
+ " start_y = (y // 2) - (min_dim // 2)\n",
299
+ " return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]\n",
300
+ "\n",
301
+ "# Đọc video và xử lý từng frame\n",
302
+ "def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE)):\n",
303
+ " cap = cv2.VideoCapture(path)\n",
304
+ " frames = []\n",
305
+ " try:\n",
306
+ " while True:\n",
307
+ " ret, frame = cap.read()\n",
308
+ " if not ret:\n",
309
+ " break\n",
310
+ " frame = crop_center_square(frame)\n",
311
+ " frame = cv2.resize(frame, resize)\n",
312
+ " frame = frame[:, :, [2, 1, 0]]\n",
313
+ " frames.append(frame)\n",
314
+ "\n",
315
+ " if len(frames) == max_frames:\n",
316
+ " break\n",
317
+ " finally:\n",
318
+ " cap.release()\n",
319
+ " return np.array(frames)\n",
320
+ "\n",
321
+ "# Xây dựng mô hình trích xuất đặc trưng (InceptionV3)\n",
322
+ "def build_feature_extractor():\n",
323
+ " feature_extractor = keras.applications.InceptionV3(\n",
324
+ " weights=\"imagenet\",\n",
325
+ " include_top=False,\n",
326
+ " pooling=\"avg\",\n",
327
+ " input_shape=(IMG_SIZE, IMG_SIZE, 3),\n",
328
+ " )\n",
329
+ " preprocess_input = keras.applications.inception_v3.preprocess_input\n",
330
+ "\n",
331
+ " inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3))\n",
332
+ " preprocessed = preprocess_input(inputs)\n",
333
+ "\n",
334
+ " outputs = feature_extractor(preprocessed)\n",
335
+ " return keras.Model(inputs, outputs, name=\"feature_extractor\")\n",
336
+ "\n",
337
+ "# Tạo feature extractor 1 lần\n",
338
+ "feature_extractor = build_feature_extractor()\n",
339
+ "\n",
340
+ "# Trích xuất đặc trưng cho từng frame của video\n",
341
+ "def prepare_video(frames, max_seq_length: int = 20):\n",
342
+ " frames = frames[None, ...]\n",
343
+ " frame_mask = np.zeros(shape=(1, max_seq_length,), dtype=\"bool\")\n",
344
+ " frame_features = np.zeros(shape=(1, max_seq_length, NUM_FEATURES), dtype=\"float32\")\n",
345
+ "\n",
346
+ " for i, batch in enumerate(frames):\n",
347
+ " video_length = batch.shape[0]\n",
348
+ " length = min(max_seq_length, video_length)\n",
349
+ " for j in range(length):\n",
350
+ " frame_features[i, j, :] = feature_extractor.predict(batch[None, j, :])\n",
351
+ " frame_mask[i, :length] = 1 # 1 = not masked, 0 = masked\n",
352
+ "\n",
353
+ " return frame_features, frame_mask\n",
354
+ "\n",
355
+ "# Dự đoán hành động từ video\n",
356
+ "def sequence_prediction(path):\n",
357
+ " class_vocab = [\"CricketShot\", \"PlayingCello\", \"Punch\", \"ShavingBeard\", \"TennisSwing\"]\n",
358
+ "\n",
359
+ " frames = load_video(path)\n",
360
+ " frame_features, frame_mask = prepare_video(frames)\n",
361
+ " probabilities = model.predict([frame_features, frame_mask])[0]\n",
362
+ "\n",
363
+ " preds = {}\n",
364
+ " for i in np.argsort(probabilities)[::-1]:\n",
365
+ " preds[class_vocab[i]] = float(probabilities[i])\n",
366
+ " return preds\n",
367
+ "\n",
368
+ "# HTML mô tả bên dưới app\n",
369
+ "article = article = \"<div style='text-align: center;'><a href='https://github.com/ChainYo' target='_blank'>Space by Thomas Chaigneau</a><br><a href='https://keras.io/examples/vision/video_classification/' target='_blank'>Keras example by Sayak Paul</a></div>\"\n",
370
+ "\n",
371
+ "# Tạo giao diện Gradio\n",
372
+ "app = gr.Interface(\n",
373
+ " fn=sequence_prediction,\n",
374
+ " inputs=[gr.Video(label=\"Video\")],\n",
375
+ " outputs=gr.Label(label=\"Prediction\"),\n",
376
+ " title=\"Keras Video Classification with CNN-RNN\",\n",
377
+ " description=\"Video classification demo using CNN-RNN based model.\",\n",
378
+ " article=article,\n",
379
+ " examples=samples\n",
380
+ ")\n",
381
+ "\n",
382
+ "# Khởi chạy ứng dụng\n",
383
+ "app.launch()\n",
384
+ "\n"
385
+ ]
386
+ }
387
+ ],
388
+ "metadata": {
389
+ "colab": {
390
+ "authorship_tag": "ABX9TyNo8ELbXbt4Di76kKEeSLhJ",
391
+ "provenance": []
392
+ },
393
+ "kernelspec": {
394
+ "display_name": "Python 3",
395
+ "name": "python3"
396
+ },
397
+ "language_info": {
398
+ "codemirror_mode": {
399
+ "name": "ipython",
400
+ "version": 3
401
+ },
402
+ "file_extension": ".py",
403
+ "mimetype": "text/x-python",
404
+ "name": "python",
405
+ "nbconvert_exporter": "python",
406
+ "pygments_lexer": "ipython3",
407
+ "version": "3.10.5"
408
+ },
409
+ "widgets": {
410
+ "application/vnd.jupyter.widget-state+json": {
411
+ "32364e76989b40b39659c3f93e727fde": {
412
+ "model_module": "@jupyter-widgets/base",
413
+ "model_module_version": "1.2.0",
414
+ "model_name": "LayoutModel",
415
+ "state": {
416
+ "_model_module": "@jupyter-widgets/base",
417
+ "_model_module_version": "1.2.0",
418
+ "_model_name": "LayoutModel",
419
+ "_view_count": null,
420
+ "_view_module": "@jupyter-widgets/base",
421
+ "_view_module_version": "1.2.0",
422
+ "_view_name": "LayoutView",
423
+ "align_content": null,
424
+ "align_items": null,
425
+ "align_self": null,
426
+ "border": null,
427
+ "bottom": null,
428
+ "display": null,
429
+ "flex": null,
430
+ "flex_flow": null,
431
+ "grid_area": null,
432
+ "grid_auto_columns": null,
433
+ "grid_auto_flow": null,
434
+ "grid_auto_rows": null,
435
+ "grid_column": null,
436
+ "grid_gap": null,
437
+ "grid_row": null,
438
+ "grid_template_areas": null,
439
+ "grid_template_columns": null,
440
+ "grid_template_rows": null,
441
+ "height": null,
442
+ "justify_content": null,
443
+ "justify_items": null,
444
+ "left": null,
445
+ "margin": null,
446
+ "max_height": null,
447
+ "max_width": null,
448
+ "min_height": null,
449
+ "min_width": null,
450
+ "object_fit": null,
451
+ "object_position": null,
452
+ "order": null,
453
+ "overflow": null,
454
+ "overflow_x": null,
455
+ "overflow_y": null,
456
+ "padding": null,
457
+ "right": null,
458
+ "top": null,
459
+ "visibility": null,
460
+ "width": null
461
+ }
462
+ },
463
+ "53a737a6c2794edcbd2d454273753b8b": {
464
+ "model_module": "@jupyter-widgets/controls",
465
+ "model_module_version": "1.5.0",
466
+ "model_name": "FloatProgressModel",
467
+ "state": {
468
+ "_dom_classes": [],
469
+ "_model_module": "@jupyter-widgets/controls",
470
+ "_model_module_version": "1.5.0",
471
+ "_model_name": "FloatProgressModel",
472
+ "_view_count": null,
473
+ "_view_module": "@jupyter-widgets/controls",
474
+ "_view_module_version": "1.5.0",
475
+ "_view_name": "ProgressView",
476
+ "bar_style": "success",
477
+ "description": "",
478
+ "description_tooltip": null,
479
+ "layout": "IPY_MODEL_7c618cdc320f4cb3909fe11d4124cd85",
480
+ "max": 11,
481
+ "min": 0,
482
+ "orientation": "horizontal",
483
+ "style": "IPY_MODEL_d9fb30527f9a4bed9896bf89cbd4eb54",
484
+ "value": 11
485
+ }
486
+ },
487
+ "5b22eb49678f44c09cf7e5a6007133de": {
488
+ "model_module": "@jupyter-widgets/controls",
489
+ "model_module_version": "1.5.0",
490
+ "model_name": "HTMLModel",
491
+ "state": {
492
+ "_dom_classes": [],
493
+ "_model_module": "@jupyter-widgets/controls",
494
+ "_model_module_version": "1.5.0",
495
+ "_model_name": "HTMLModel",
496
+ "_view_count": null,
497
+ "_view_module": "@jupyter-widgets/controls",
498
+ "_view_module_version": "1.5.0",
499
+ "_view_name": "HTMLView",
500
+ "description": "",
501
+ "description_tooltip": null,
502
+ "layout": "IPY_MODEL_fd8b65aab251427bac9380d843ad02e1",
503
+ "placeholder": "​",
504
+ "style": "IPY_MODEL_9a3b710001334adc8684c2227e2b556b",
505
+ "value": " 11/11 [00:00&lt;00:00, 569.14it/s]"
506
+ }
507
+ },
508
+ "70331cb723934c37abd7a1815a969841": {
509
+ "model_module": "@jupyter-widgets/controls",
510
+ "model_module_version": "1.5.0",
511
+ "model_name": "DescriptionStyleModel",
512
+ "state": {
513
+ "_model_module": "@jupyter-widgets/controls",
514
+ "_model_module_version": "1.5.0",
515
+ "_model_name": "DescriptionStyleModel",
516
+ "_view_count": null,
517
+ "_view_module": "@jupyter-widgets/base",
518
+ "_view_module_version": "1.2.0",
519
+ "_view_name": "StyleView",
520
+ "description_width": ""
521
+ }
522
+ },
523
+ "7953decbcc66485d9e477a9d45777f99": {
524
+ "model_module": "@jupyter-widgets/controls",
525
+ "model_module_version": "1.5.0",
526
+ "model_name": "HBoxModel",
527
+ "state": {
528
+ "_dom_classes": [],
529
+ "_model_module": "@jupyter-widgets/controls",
530
+ "_model_module_version": "1.5.0",
531
+ "_model_name": "HBoxModel",
532
+ "_view_count": null,
533
+ "_view_module": "@jupyter-widgets/controls",
534
+ "_view_module_version": "1.5.0",
535
+ "_view_name": "HBoxView",
536
+ "box_style": "",
537
+ "children": [
538
+ "IPY_MODEL_f8709807dedf4e349dd9d463db6abac7",
539
+ "IPY_MODEL_53a737a6c2794edcbd2d454273753b8b",
540
+ "IPY_MODEL_5b22eb49678f44c09cf7e5a6007133de"
541
+ ],
542
+ "layout": "IPY_MODEL_32364e76989b40b39659c3f93e727fde"
543
+ }
544
+ },
545
+ "7c618cdc320f4cb3909fe11d4124cd85": {
546
+ "model_module": "@jupyter-widgets/base",
547
+ "model_module_version": "1.2.0",
548
+ "model_name": "LayoutModel",
549
+ "state": {
550
+ "_model_module": "@jupyter-widgets/base",
551
+ "_model_module_version": "1.2.0",
552
+ "_model_name": "LayoutModel",
553
+ "_view_count": null,
554
+ "_view_module": "@jupyter-widgets/base",
555
+ "_view_module_version": "1.2.0",
556
+ "_view_name": "LayoutView",
557
+ "align_content": null,
558
+ "align_items": null,
559
+ "align_self": null,
560
+ "border": null,
561
+ "bottom": null,
562
+ "display": null,
563
+ "flex": null,
564
+ "flex_flow": null,
565
+ "grid_area": null,
566
+ "grid_auto_columns": null,
567
+ "grid_auto_flow": null,
568
+ "grid_auto_rows": null,
569
+ "grid_column": null,
570
+ "grid_gap": null,
571
+ "grid_row": null,
572
+ "grid_template_areas": null,
573
+ "grid_template_columns": null,
574
+ "grid_template_rows": null,
575
+ "height": null,
576
+ "justify_content": null,
577
+ "justify_items": null,
578
+ "left": null,
579
+ "margin": null,
580
+ "max_height": null,
581
+ "max_width": null,
582
+ "min_height": null,
583
+ "min_width": null,
584
+ "object_fit": null,
585
+ "object_position": null,
586
+ "order": null,
587
+ "overflow": null,
588
+ "overflow_x": null,
589
+ "overflow_y": null,
590
+ "padding": null,
591
+ "right": null,
592
+ "top": null,
593
+ "visibility": null,
594
+ "width": null
595
+ }
596
+ },
597
+ "9a3b710001334adc8684c2227e2b556b": {
598
+ "model_module": "@jupyter-widgets/controls",
599
+ "model_module_version": "1.5.0",
600
+ "model_name": "DescriptionStyleModel",
601
+ "state": {
602
+ "_model_module": "@jupyter-widgets/controls",
603
+ "_model_module_version": "1.5.0",
604
+ "_model_name": "DescriptionStyleModel",
605
+ "_view_count": null,
606
+ "_view_module": "@jupyter-widgets/base",
607
+ "_view_module_version": "1.2.0",
608
+ "_view_name": "StyleView",
609
+ "description_width": ""
610
+ }
611
+ },
612
+ "cb23ba857ca548e88967a0ef2d3f32e4": {
613
+ "model_module": "@jupyter-widgets/base",
614
+ "model_module_version": "1.2.0",
615
+ "model_name": "LayoutModel",
616
+ "state": {
617
+ "_model_module": "@jupyter-widgets/base",
618
+ "_model_module_version": "1.2.0",
619
+ "_model_name": "LayoutModel",
620
+ "_view_count": null,
621
+ "_view_module": "@jupyter-widgets/base",
622
+ "_view_module_version": "1.2.0",
623
+ "_view_name": "LayoutView",
624
+ "align_content": null,
625
+ "align_items": null,
626
+ "align_self": null,
627
+ "border": null,
628
+ "bottom": null,
629
+ "display": null,
630
+ "flex": null,
631
+ "flex_flow": null,
632
+ "grid_area": null,
633
+ "grid_auto_columns": null,
634
+ "grid_auto_flow": null,
635
+ "grid_auto_rows": null,
636
+ "grid_column": null,
637
+ "grid_gap": null,
638
+ "grid_row": null,
639
+ "grid_template_areas": null,
640
+ "grid_template_columns": null,
641
+ "grid_template_rows": null,
642
+ "height": null,
643
+ "justify_content": null,
644
+ "justify_items": null,
645
+ "left": null,
646
+ "margin": null,
647
+ "max_height": null,
648
+ "max_width": null,
649
+ "min_height": null,
650
+ "min_width": null,
651
+ "object_fit": null,
652
+ "object_position": null,
653
+ "order": null,
654
+ "overflow": null,
655
+ "overflow_x": null,
656
+ "overflow_y": null,
657
+ "padding": null,
658
+ "right": null,
659
+ "top": null,
660
+ "visibility": null,
661
+ "width": null
662
+ }
663
+ },
664
+ "d9fb30527f9a4bed9896bf89cbd4eb54": {
665
+ "model_module": "@jupyter-widgets/controls",
666
+ "model_module_version": "1.5.0",
667
+ "model_name": "ProgressStyleModel",
668
+ "state": {
669
+ "_model_module": "@jupyter-widgets/controls",
670
+ "_model_module_version": "1.5.0",
671
+ "_model_name": "ProgressStyleModel",
672
+ "_view_count": null,
673
+ "_view_module": "@jupyter-widgets/base",
674
+ "_view_module_version": "1.2.0",
675
+ "_view_name": "StyleView",
676
+ "bar_color": null,
677
+ "description_width": ""
678
+ }
679
+ },
680
+ "f8709807dedf4e349dd9d463db6abac7": {
681
+ "model_module": "@jupyter-widgets/controls",
682
+ "model_module_version": "1.5.0",
683
+ "model_name": "HTMLModel",
684
+ "state": {
685
+ "_dom_classes": [],
686
+ "_model_module": "@jupyter-widgets/controls",
687
+ "_model_module_version": "1.5.0",
688
+ "_model_name": "HTMLModel",
689
+ "_view_count": null,
690
+ "_view_module": "@jupyter-widgets/controls",
691
+ "_view_module_version": "1.5.0",
692
+ "_view_name": "HTMLView",
693
+ "description": "",
694
+ "description_tooltip": null,
695
+ "layout": "IPY_MODEL_cb23ba857ca548e88967a0ef2d3f32e4",
696
+ "placeholder": "​",
697
+ "style": "IPY_MODEL_70331cb723934c37abd7a1815a969841",
698
+ "value": "Fetching 11 files: 100%"
699
+ }
700
+ },
701
+ "fd8b65aab251427bac9380d843ad02e1": {
702
+ "model_module": "@jupyter-widgets/base",
703
+ "model_module_version": "1.2.0",
704
+ "model_name": "LayoutModel",
705
+ "state": {
706
+ "_model_module": "@jupyter-widgets/base",
707
+ "_model_module_version": "1.2.0",
708
+ "_model_name": "LayoutModel",
709
+ "_view_count": null,
710
+ "_view_module": "@jupyter-widgets/base",
711
+ "_view_module_version": "1.2.0",
712
+ "_view_name": "LayoutView",
713
+ "align_content": null,
714
+ "align_items": null,
715
+ "align_self": null,
716
+ "border": null,
717
+ "bottom": null,
718
+ "display": null,
719
+ "flex": null,
720
+ "flex_flow": null,
721
+ "grid_area": null,
722
+ "grid_auto_columns": null,
723
+ "grid_auto_flow": null,
724
+ "grid_auto_rows": null,
725
+ "grid_column": null,
726
+ "grid_gap": null,
727
+ "grid_row": null,
728
+ "grid_template_areas": null,
729
+ "grid_template_columns": null,
730
+ "grid_template_rows": null,
731
+ "height": null,
732
+ "justify_content": null,
733
+ "justify_items": null,
734
+ "left": null,
735
+ "margin": null,
736
+ "max_height": null,
737
+ "max_width": null,
738
+ "min_height": null,
739
+ "min_width": null,
740
+ "object_fit": null,
741
+ "object_position": null,
742
+ "order": null,
743
+ "overflow": null,
744
+ "overflow_x": null,
745
+ "overflow_y": null,
746
+ "padding": null,
747
+ "right": null,
748
+ "top": null,
749
+ "visibility": null,
750
+ "width": null
751
+ }
752
+ }
753
+ }
754
+ }
755
+ },
756
+ "nbformat": 4,
757
+ "nbformat_minor": 0
758
+ }
app.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """🎬 Keras Video Classification CNN-RNN model
3
+
4
+ Spaces for showing the model usage.
5
+
6
+ Author:
7
+ - Thomas Chaigneau @ChainYo
8
+ """
9
+ import os
10
+ import cv2
11
+
12
+ import gradio as gr
13
+ import numpy as np
14
+
15
+ from tensorflow import keras
16
+
17
+ from tensorflow_docs.vis import embed
18
+
19
+ from huggingface_hub import from_pretrained_keras
20
+
21
+ # Kích thước ảnh đầu vào và số lượng đặc trưng
22
+ IMG_SIZE = 224
23
+ NUM_FEATURES = 2048
24
+
25
+ # Tải mô hình CNN-RNN từ HuggingFace
26
+ model = from_pretrained_keras("keras-io/video-classification-cnn-rnn")
27
+
28
+ # Tạo danh sách video ví dụ từ thư mục Samples
29
+ samples = []
30
+ for file in os.listdir("Samples"):
31
+ tag = file.split("_")[1]
32
+ samples.append([f"samples/{file}"])
33
+
34
+ # Cắt phần hình vuông ở trung tâm frame
35
+ def crop_center_square(frame):
36
+ y, x = frame.shape[0:2]
37
+ min_dim = min(y, x)
38
+ start_x = (x // 2) - (min_dim // 2)
39
+ start_y = (y // 2) - (min_dim // 2)
40
+ return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]
41
+
42
+ # Đọc video và xử lý từng frame
43
+ def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE)):
44
+ cap = cv2.VideoCapture(path)
45
+ frames = []
46
+ try:
47
+ while True:
48
+ ret, frame = cap.read()
49
+ if not ret:
50
+ break
51
+ frame = crop_center_square(frame)
52
+ frame = cv2.resize(frame, resize)
53
+ frame = frame[:, :, [2, 1, 0]]
54
+ frames.append(frame)
55
+
56
+ if len(frames) == max_frames:
57
+ break
58
+ finally:
59
+ cap.release()
60
+ return np.array(frames)
61
+
62
+ # Xây dựng mô hình trích xuất đặc trưng (InceptionV3)
63
+ def build_feature_extractor():
64
+ feature_extractor = keras.applications.InceptionV3(
65
+ weights="imagenet",
66
+ include_top=False,
67
+ pooling="avg",
68
+ input_shape=(IMG_SIZE, IMG_SIZE, 3),
69
+ )
70
+ preprocess_input = keras.applications.inception_v3.preprocess_input
71
+
72
+ inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3))
73
+ preprocessed = preprocess_input(inputs)
74
+
75
+ outputs = feature_extractor(preprocessed)
76
+ return keras.Model(inputs, outputs, name="feature_extractor")
77
+
78
+ # Tạo feature extractor 1 lần
79
+ feature_extractor = build_feature_extractor()
80
+
81
+ # Trích xuất đặc trưng cho từng frame của video
82
+ def prepare_video(frames, max_seq_length: int = 20):
83
+ frames = frames[None, ...]
84
+ frame_mask = np.zeros(shape=(1, max_seq_length,), dtype="bool")
85
+ frame_features = np.zeros(shape=(1, max_seq_length, NUM_FEATURES), dtype="float32")
86
+
87
+ for i, batch in enumerate(frames):
88
+ video_length = batch.shape[0]
89
+ length = min(max_seq_length, video_length)
90
+ for j in range(length):
91
+ frame_features[i, j, :] = feature_extractor.predict(batch[None, j, :])
92
+ frame_mask[i, :length] = 1 # 1 = not masked, 0 = masked
93
+
94
+ return frame_features, frame_mask
95
+
96
+ # Dự đoán hành động từ video
97
+ def sequence_prediction(path):
98
+ class_vocab = ["CricketShot", "PlayingCello", "Punch", "ShavingBeard", "TennisSwing"]
99
+
100
+ frames = load_video(path)
101
+ frame_features, frame_mask = prepare_video(frames)
102
+ probabilities = model.predict([frame_features, frame_mask])[0]
103
+
104
+ preds = {}
105
+ for i in np.argsort(probabilities)[::-1]:
106
+ preds[class_vocab[i]] = float(probabilities[i])
107
+ return preds
108
+
109
+ # HTML mô tả bên dưới app
110
+ article = article = "<div style='text-align: center;'><a href='https://github.com/ChainYo' target='_blank'>Space by Thomas Chaigneau</a><br><a href='https://keras.io/examples/vision/video_classification/' target='_blank'>Keras example by Sayak Paul</a></div>"
111
+
112
+ # Tạo giao diện Gradio
113
+ app = gr.Interface(
114
+ fn=sequence_prediction,
115
+ inputs=[gr.Video(label="Video")],
116
+ outputs=gr.Label(label="Prediction"),
117
+ title="Keras Video Classification with CNN-RNN",
118
+ description="Video classification demo using CNN-RNN based model.",
119
+ article=article,
120
+ examples=samples
121
+ )
122
+
123
+ # Khởi chạy ứng dụng
124
+ app.launch()
125
+
126
+
gitattributes.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ opencv-python-headless
2
+ tensorflow
3
+ git+https://github.com/tensorflow/docs