-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathindex.html
338 lines (305 loc) · 14.9 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width">
<meta charset="utf-8">
<!-- Meta tags for social media banners, these should be filled in appropriatly as they are your "business card" -->
<!-- Replace the content tag with appropriate information -->
<meta name="description" content="DESCRIPTION META TAG">
<meta property="og:title" content="SOCIAL MEDIA TITLE TAG"/>
<meta property="og:description" content="SOCIAL MEDIA DESCRIPTION TAG TAG"/>
<meta property="og:url" content="URL OF THE WEBSITE"/>
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X630-->
<meta property="og:image" content="static/image/your_banner_image.png" />
<meta property="og:image:width" content="1200"/>
<meta property="og:image:height" content="630"/>
<meta name="twitter:title" content="TWITTER BANNER TITLE META TAG">
<meta name="twitter:description" content="TWITTER BANNER DESCRIPTION META TAG">
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X600-->
<meta name="twitter:image" content="static/images/your_twitter_banner_image.png">
<meta name="twitter:card" content="summary_large_image">
<!-- Keywords for your paper to be indexed by-->
<meta name="keywords" content="KEYWORDS SHOULD BE PLACED HERE">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Enhancing Gait Video Analysis in Neurodegenerative Diseases by Knowledge Augmentation in Vision Language Model</title>
<link rel="icon" type="image/x-icon" href="static/images/icube.ico">
<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
rel="stylesheet">
<link rel="stylesheet" href="static/css/bulma.min.css">
<link rel="stylesheet" href="static/css/bulma-carousel.min.css">
<link rel="stylesheet" href="static/css/bulma-slider.min.css">
<link rel="stylesheet" href="static/css/fontawesome.all.min.css">
<link rel="stylesheet"
href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
<link rel="stylesheet" href="static/css/index.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script src="https://documentcloud.adobe.com/view-sdk/main.js"></script>
<script defer src="static/js/fontawesome.all.min.js"></script>
<script src="static/js/bulma-carousel.min.js"></script>
<script src="static/js/bulma-slider.min.js"></script>
<script src="static/js/index.js"></script>
</head>
<body>
<style>
div {
text-align: center;
}
</style>
<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<h1 class="title is-1 publication-title">Enhancing Gait Video Analysis in Neurodegenerative Diseases by Knowledge Augmentation in Vision Language Model</h1>
<div class="is-size-5 publication-authors">
<!-- Paper authors -->
<span class="author-block">
Diwei Wang<sup>1</sup>, </span>
<span class="author-block">
<a href="https://flaick.github.io/" target="_blank">Kun Yuan<sup>1</sup></a>, </span>
<span class="author-block">
Candice Muller<sup>2</sup>, </span>
<span class="author-block">
<a href="https://www.chru-strasbourg.fr/praticien/blanc-frederic/" target="_blank">Frédéric Blanc<sup>1,2</sup></a>, </span>
<span class="author-block">
<a href="https://camma.unistra.fr/npadoy/" target="_blank">Nicolas Padoy<sup>1</sup></a>, and</span>
<span class="author-block">
<a href="https://igg.unistra.fr/People/seo/Home.html" target="_blank">Hyewon Seo<sup>1</sup></a>
</span>
</div>
<div class="is-size-5 publication-authors">
<span class="author-block"><sup>1</sup> ICube laboratory, University of Strasbourg, CNRS, France</span>
<br><span class="author-block"><sup>2</sup> Hôpital de la Robertsau, France</span>
<!-- <span class="eql-cntrb"><small><br><sup>*</sup>Indicates Equal Contribution</small></span> -->
</div>
<div class="imageflex">
<img src="static/images/miccai2024-logo.png" width="200">
</div>
<div class="column has-text-centered">
<div class="publication-links">
<!-- Arxiv PDF link -->
<span class="link-block">
<a href="static/pdfs/Camera-ready_paper_2283.pdf" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Paper</span>
</a>
</span>
<!-- Supplementary PDF link -->
<span class="link-block">
<a href="static/pdfs/supplementary_material.pdf" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Supplementary</span>
</a>
</span>
<!-- Poster PDF link -->
<span class="link-block">
<a href="static/pdfs/poster.pdf" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Poster</span>
</a>
</span>
<!-- Github link -->
<span class="link-block">
<a href="https://github.com/lisqzqng/GaitAnalysisVLM" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fab fa-github"></i>
</span>
<span>Code</span>
</a>
</span>
<!-- ArXiv abstract Link -->
<!-- <span class="link-block">
<a href="https://arxiv.org/abs/<ARXIV PAPER ID>" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="ai ai-arxiv"></i>
</span>
<span>arXiv</span>
</a>
</span> -->
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Paper abstract -->
<section class="section hero is-light">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">Abstract</h2>
<div class="content has-text-justified">
<p>
We present a knowledge augmentation strategy for assessing the diagnostic groups and gait impairment from monocular gait videos. Based on a large-scale pre-trained Vision Language Model (VLM), our model learns and improves visual, textual, and numerical representations of patient gait videos, through a collective learning across three distinct modalities: gait videos, class-specific descriptions, and numerical gait parameters. Our specific contributions are two-fold: First, we adopt a knowledge-aware prompt tuning strategy to utilize the class-specific medical description in guiding the text prompt learning. Second, we integrate the paired gait parameters in the form of numerical texts to enhance the numeracy of the textual representation. Results demonstrate that our model not only significantly outperforms state-of-the-art methods in video-based classification tasks but also adeptly decodes the learned class-specific text features into natural language descriptions using the vocabulary of quantitative gait parameters.
</p>
</div>
</div>
</div>
</div>
</section>
<!-- End paper abstract -->
<!-- Image carousel -->
<section class="hero is-small">
<div class="hero-body">
<div class="container">
<div id="results-carousel" class="carousel results-carousel">
<div class="item">
<!-- Your image here -->
<img src="static/images/fig1.png" alt="MY ALT TEXT" width="600"/>
<h2 class="subtitle has-text-centered">
Fig.1: Overview of our cross-modality model for video-based clinical gait analysis. Three colored blocks represent the text- and video encoding pipelines, and the text embedding of numerical gait parameters, respectively.
</h2>
</div>
<div class="item">
<!-- Your image here -->
<img src="static/images/fig2.png" alt="MY ALT TEXT" width="900"/>
<h2 class="subtitle has-text-centered">
Fig.2: Translation of gait parameters into text.
</h2>
</div>
<div class="item">
<!-- Your image here -->
<img src="static/images/fig3.png" alt="MY ALT TEXT" width="900"/>
<h2 class="subtitle has-text-centered">
Fig.3: Numerical text embedding process using the frozen CLIP text encoder.
</h2>
</div>
<div class="item">
<!-- Your image here -->
<img src="static/images/fig4a.png" alt="MY ALT TEXT" width="450"/><img src="static/images/fig4b.png" alt="MY ALT TEXT" width="450"/>
<h2 class="subtitle has-text-centered">
Fig.4: UMAP visualization (3 components) of numerical text embeddings from gait parameters: original embeddings on the left and MLP-projected embeddings on the right, with yellow points indicating learned per-class text features.
</h2>
</div>
</div>
</div>
</div>
</section>
<!-- End image carousel -->
<!-- presentation video -->
<section class="hero is-small is-light">
<div class="hero-body">
<div class="container">
<!-- Paper video. -->
<h2 class="title is-3">Video Presentation</h2>
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<div class="publication-video">
<!-- Youtube embed code here -->
<iframe src="https://drive.google.com/file/d/1f98Db2H-foaJsmdSNwO1fuZ0hFbm_G-B/preview" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- End youtube video -->
<!-- Add section for interactive html webpage embedded using iframe, for file: './static/htmls/updrs.html' and './static/htmls/diag.html' -->
<section class="section">
<div class="container is-max-desktop">
<h2 class="title is-3">Numerical Text embeddings</h2>
<div class="content">
<p>
We provide interactive webpages to visualize the Numerical Text Embeddings derived from gait parameters. The embeddings are visualized using UMAP with 3 components, and the projections of learned per-class text features are highlighted in yellow.
</p>
<div class="columns is-centered">
<div class="interactive-webpage">
<iframe src="static/htmls/updrs.html" width="1200px" height="800px"></iframe>
</div>
</div>
<div class="columns is-centered">
<div class="interactive-webpage">
<iframe src="static/htmls/diag.html" width="1200px" height="800px"></iframe>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Video carousel -->
<!-- <section class="hero is-small">
<div class="hero-body">
<div class="container">
<h2 class="title is-3">Another Carousel</h2>
<div id="results-carousel" class="carousel results-carousel">
<div class="item item-video1">
<video poster="" id="video1" autoplay controls muted loop height="100%">
Your video file here -->
<!-- <source src="static/videos/carousel1.mp4"
type="video/mp4">
</video>
</div>
<div class="item item-video2">
<video poster="" id="video2" autoplay controls muted loop height="100%">
Your video file here
<source src="static/videos/carousel2.mp4"
type="video/mp4">
</video>
</div>
<div class="item item-video3">
<video poster="" id="video3" autoplay controls muted loop height="100%">\
Your video file here
<source src="static/videos/carousel3.mp4"
type="video/mp4"> -->
<!-- </video> -->
<!-- </div> -->
<!-- </div> -->
<!-- </div> -->
<!-- </div> -->
<!-- </section> -->
<!-- End video carousel -->
<!--BibTex citation -->
<section class="section" id="BibTeX">
<div class="container is-max-desktop content">
<h2 class="title">BibTeX</h2>
<!-- to make locally left-aligned -->
<style>
pre {
text-align: left;
}
</style>
<pre><code>@InProceedings{WangEnhancingMICCAI2024,
author = { Wang, Diwei and Yuan, Kun and Muller, Candice and Blanc, Frédéric and Padoy, Nicolas and Seo, Hyewon },
title = { Enhancing Gait Video Analysis in Neurodegenerative Diseases by Knowledge Augmentation in Vision Language Model },
booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},
year = {2024},
publisher = {Springer Nature Switzerland},
volume = { LNCS 15005 },
month = {October},
pages = { pending },
}</code></pre>
</div>
</section>
<!--End BibTex citation -->
<footer class="footer">
<div class="container">
<div class="columns is-centered">
<div class="column is-8">
<div class="content">
<p>
This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template" target="_blank">Academic Project Page Template</a> which was adopted from the <a href="https://nerfies.github.io" target="_blank">Nerfies</a> project page.
<br> This website is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/" target="_blank">Creative
Commons Attribution-ShareAlike 4.0 International License</a>.
</p>
</div>
</div>
</div>
</div>
</footer>
<!-- Statcounter tracking code -->
<!-- You can add a tracker to track page visits by creating an account at statcounter.com -->
<!-- End of Statcounter Code -->
</body>
</html>