-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patharticle-78378.htm
500 lines (454 loc) · 29 KB
/
article-78378.htm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
<!doctype html>
<html xml:lang="zh-CN" lang="zh-CN">
<head>
<link rel="canonical" href="https://vpnbrowser.github.io/news/article-78378.htm" />
<!-- Required meta tags -->
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<title>利用 onnxruntime 库同时推理多个模型的效率研究</title>
<meta name="description" content="1. 背景 需求:针对视频形式的数据输入,对每一帧图像,有多个神经网络模型需要进行推理并获得预测结果。如何让整个推理过程更加高效,尝试了几种不同的方案。 硬件:单显卡主机。 2. 方案 由于存在多个模" />
<link rel="icon" href="/assets/website/img/vmess_node/favicon.ico" type="image/x-icon"/>
<meta name="author" content="VPN浏览器 推荐免费VPN节点订阅中文网">
<meta property="og:type" content="article" />
<meta property="og:url" content="https://vpnbrowser.github.io/news/article-78378.htm" />
<meta property="og:site_name" content="VPN浏览器 推荐免费VPN节点订阅中文网" />
<meta property="og:title" content="利用 onnxruntime 库同时推理多个模型的效率研究" />
<meta property="og:image" content="https://vpnbrowser.github.io/uploads/20240804-1/7b808bd742e7d9dfce54c1b6a1aa31d0.webp" />
<meta property="og:release_date" content="2025-03-21T10:30:46" />
<meta property="og:updated_time" content="2025-03-21T10:30:46" />
<meta property="og:description" content="1. 背景 需求:针对视频形式的数据输入,对每一帧图像,有多个神经网络模型需要进行推理并获得预测结果。如何让整个推理过程更加高效,尝试了几种不同的方案。 硬件:单显卡主机。 2. 方案 由于存在多个模" />
<meta name="applicable-device" content="pc,mobile" />
<meta name="renderer" content="webkit" />
<meta name="force-rendering" content="webkit" />
<meta http-equiv="Cache-Control" content="no-transform" />
<meta name="robots" content="max-image-preview:large" />
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black">
<meta name="apple-mobile-web-app-title" content="利用 onnxruntime 库同时推理多个模型的效率研究">
<meta name="format-detection" content="telephone=no">
<link rel="dns-prefetch" href="https:/www.googletagmanager.com">
<link rel="dns-prefetch" href="https://www.googleadservices.com">
<link rel="dns-prefetch" href="https://www.google-analytics.com">
<link rel="dns-prefetch" href="https://pagead2.googlesyndication.com">
<link rel="dns-prefetch" href="https://cm.g.doubleclick.net">
<link rel="dns-prefetch" href="https://fonts.googleapis.com">
<!-- google fonts -->
<link href="//fonts.googleapis.com/css?family=Work+Sans:100,200,300,400,500,600,700,800,900&display=swap" rel="stylesheet">
<!-- Template CSS -->
<link rel="stylesheet" href="/assets/website/css/vmess_node/style-starter.css">
<!-- Google tag (gtag.js) -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-V3ES7YX37P"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-V3ES7YX37P');
</script>
<script async src="https://pagead2.googlesyndication.com/pagead/js/adsbygoogle.js?client=ca-pub-3332997411212854"
crossorigin="anonymous"></script>
</head>
<body data-page="detail">
<!--header-->
<div class="w3l-top-header">
<header id="site-header" class="fixed-top">
<div class="container">
<nav class="navbar navbar-expand-lg navbar-dark stroke">
<a class="navbar-brand" href="/">
<span>VPN浏览器</span>
</a>
<button class="navbar-toggler collapsed bg-gradient" type="button" data-toggle="collapse" data-target="#navbarTogglerDemo02" aria-controls="navbarTogglerDemo02" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon fa icon-expand fa-bars"></span>
<span class="navbar-toggler-icon fa icon-close fa-times"></span>
</span>
</button>
<div class="collapse navbar-collapse" id="navbarTogglerDemo02">
<ul class="navbar-nav mx-auto">
<li class="nav-item">
<a class="nav-link" href="/">首页</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/free-nodes/">免费节点</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/paid-subscribe/">推荐机场</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/news/">新闻资讯</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/client.htm">客户端</a>
</li>
</ul>
</div>
</nav>
</div>
</header>
</div>
<!--/header-->
<!-- breadcrum -->
<section class="w3l-deal-breadcrum">
<div class="breadcrum py-5">
<div class="container">
<h1>利用 onnxruntime 库同时推理多个模型的效率研究</h1>
<p><a href="/">首页</a> / <a href="/news/">新闻资讯</a> / 正文</p>
</div>
</div>
</section>
<!-- //breadcrum -->
<!-- //stats -->
<section class="w3l-index2 py-5" id="about">
<div class="container py-lg-3">
<div class="row">
<div class="col-md-9">
<input type="hidden" id="share-website-info" data-name="Clash Node官网订阅站" data-url="https://clashnode.github.io">
<h2 id="1-背景">1. 背景</h2> <p><strong>需求</strong>:针对视频形式的数据输入,对每一帧图像,有多个神经网络模型需要进行推理并获得预测结果。如何让整个推理过程更加高效,尝试了几种不同的方案。</p> <p><strong>硬件</strong>:单显卡主机。</p> <h2 id="2-方案">2. 方案</h2> <p>由于存在多个模型需要推理,但模型之间没有相互依赖关系,因此很容易想到通过<strong>并行</strong>的方式来提高运行效率。</p> <p>对比了如下几种方案的结果,包括:</p> <ol> <li>串行</li> <li>线程</li> <li>进程</li> <li>协程</li> </ol> <h2 id="3-实现">3. 实现</h2> <h3 id="31-整体流程">3.1 整体流程</h3> <p>配置了 4 个体量相近的模型。<br /> 为了屏蔽读取和解码的时间消耗对最终结果的影响,提前读取视频并准备输入。<br /> 统计每个单独模型执行推理的累积时间,以及整体的运行时间。</p> <pre><code class="language-python">import asyncio from time import time def main(): frames = load_video() weights = load_weights() print('串行:') one_by_one(weights, frames) print('多线程:') multit_thread(weights, frames) print('多进程:') multi_process(weights, frames) print('协程:') asyncio.run(coroutine(weights, frames))</code></pre> <h3 id="32-串行">3.2 串行</h3> <p>读取到当前帧数据后,所有模型依次运行。</p> <pre><code class="language-python">def one_by_one(weights, frames): sessions = [init_session(weight) for weight in weights] costs = [[] for _ in range(len(weights))] since_infer = time() for frame in frames: for session in sessions: since = time() _ = session.run('output', {'input': frame}) cost = time() - since costs[idx].append(cost) print([sum(cost) for cost in costs]) print("infer:", time() - since_infer) return</code></pre> <h3 id="33-多线程">3.3 多线程</h3> <p>为每一个模型分配一个线程。</p> <pre><code class="language-python">from threading import Thread def multit_thread(weights, frames): sessions = [init_session(weight) for weight in weights] threads = [] since_infer = time() for session in sessions: thread = Thread(target=run_session_thread, args=(session, frames)) thread.start() threads.append(thread) for thread in threads: thread.join() print("infer:", time() - since_infer) return def run_session_thread(session, frames): costs = [] for frame in frames: since = time() _ = session.run('output', {'input': frame}) costs.append(time() - since) print(sum(costs)) return</code></pre> <h3 id="34-多进程">3.4 多进程</h3> <p>为每一个模型分配一个进程。<br /> 由于 session 不能在进程间传递,因此需要在每个进程的内部单独初始化。如果数据较多,这部分初始化的时间消耗基本可以忽略不急。</p> <pre><code class="language-python">from multiprocessing import Manager, Process def multi_process(weights, frames): inputs = Manager().list(frames) processes = [] since_infer = time() for weight in weights: process = Process(target=run_session_process, args=(weight, inputs)) process.start() processes.append(process) for process in processes: process.join() print("infer:", time() - since_infer) return def run_session_process(weight, frames): session = init_session(weight) costs = [] for frame in frames: since = time() _ = session.run('output', {'input': frame}) costs.append(time() - since) print(sum(costs)) return</code></pre> <h3 id="35-协程">3.5 协程</h3> <p>为每一个模型分配一个协程。</p> <pre><code class="language-python">async def coroutine(weights, frames): sessions = [init_session(weight) for weight in weights] since_infer = time() tasks = [ asyncio.create_task(run_session_coroutine(session, frames)) for session in sessions ] for task in tasks: await task print("infer:", time() - since_all) return async def run_session_coroutine(session, frames): costs = [] for frame in frames: since = time() _ = session.run('output', {'input': frame}) costs.append(time() - since) print(sum(costs)) return</code></pre> <h3 id="36-其他辅助函数">3.6 其他辅助函数</h3> <pre><code class="language-python">import cv2 import numpy as np import onnxruntime as ort def init_session(weight): provider = "CUDAExecutionProvider" session = ort.InferenceSession(weight, providers=[provider]) return session def load_video(): # 为了减少读视频的时间,复制相同的图片组成batch vcap = cv2.VideoCapture('path_to_video') count = 1000 batch_size = 4 frames = [] for _ in range(count): _, frame = vcap.read() frame = cv2.resize(frame, (256, 256)).transpose((2, 0, 1)) frame = np.stack([frame] * batch_size, axis=0) frames.append(frame.astype(np.float32)) return frames def load_weights(): return ['path_to_weights_0', 'path_to_weights_1', 'path_to_weights_2', 'path_to_weights_3',]</code></pre> <h2 id="4-结果及分析">4. 结果及分析</h2> <h3 id="41-执行结果">4.1 执行结果</h3> <p>以<code>batch_size=4</code>共运行 1000 帧数据,推理结果如下:</p> <table> <thead> <tr> <th>方案</th> <th>串行</th> <th>线程</th> <th>进程</th> <th>协程</th> </tr> </thead> <tbody> <tr> <td>单模型累积时间/s</td> <td>7.9/5.3/5.2/5.2</td> <td>13.5/13.5/15.6/15.7</td> <td>13.5/13.8/13.7/13.6</td> <td>6.5/5.2/5.3/5.3</td> </tr> <tr> <td>总时间/s</td> <td>23.7</td> <td>15.8</td> <td>30.1</td> <td>22.5</td> </tr> <tr> <td>显存占用/MB</td> <td>1280</td> <td>1416</td> <td>3375</td> <td>1280</td> </tr> <tr> <td>平均<strong>GPU-Util</strong></td> <td>约 60%</td> <td>约 85%</td> <td>约 70%</td> <td>约 55%</td> </tr> </tbody> </table> <ul> <li>在这个场景下,<strong>多线程</strong>是综合效率最高的方式(时间最短、显存占用合理、GPU 利用率最高);</li> <li>串行作为最基础的方案,总时间就是每个模型执行时间之和;</li> <li>多进程的方式,单模型的累积时间与多线程类似,但是总时间有明显增加,且极大增加了显存占用;</li> <li>用协程的方式,总结果看,与串行模式本质上是一样的。</li> </ul> <h3 id="42-结果分析">4.2 结果分析</h3> <h4 id="421-关于线程方案">4.2.1 关于线程方案</h4> <p><strong>为什么多线程相比串行可以提高运行效率?</strong></p> <ul> <li>基本的判断是,<code>session.run()</code>函数运行时,既有 CPU 执行的部分,又有 GPU 执行的部分;</li> <li>如果是串行方案,则 CPU 运行时,GPU 会等待,反之亦然;</li> <li>当换用多线程方案后,当一个线程从 CPU 执行切换到 GPU 执行后,会继续执行另一个线程的 CPU 部分,并等待 GPU 返回结果。</li> </ul> <h4 id="422-关于进程方案">4.2.2 关于进程方案</h4> <p><strong>为什么多进程反而降低了运行效率?</strong></p> <ul> <li>基本的判断是,整体执行的瓶颈并不在 CPU 的运算部分,而是在于 GPU 上模型前向推理的计算部分;</li> <li>因此,用多个进程并没有充分利用系统资源,多个 CPU 核心会争夺同一个 GPU 的计算资源,并增加了调度消耗。</li> </ul> <h4 id="423-关于协程方案">4.2.3 关于协程方案</h4> <p><strong>为什么看起来协程与串行的效果一样?</strong></p> <p>协程方案在执行过程中,从表现上来看:</p> <ul> <li>单个模型的累积时间是逐步<code>print</code>出来的,间隔大致等于每个模型的累积时间(而线程和进程方案中,几乎是同时输出 4 个模型的累积时间,说明是同时运行结束);</li> <li>显存占用是逐步增加的,最后达到与串行方案一致。</li> </ul> <p>可能的原因:</p> <ul> <li>CPU 和 GPU 的任务切换,可能无法触发协程的切换,导致最终的效果是,一个模型完成了所有数据的推理后,再进行下一个模型的推理。</li> </ul> <p>使用协程的必要性:</p> <ul> <li>从线程改为协程,是为了进一步降低线程切换的消耗;</li> <li>在这个场景下,需要同时执行推理的模型数量一般不会太多,建立同样数量的线程,系统资源的消耗是可控的;</li> <li>因此,没有使用协程的必要性。</li> </ul> <blockquote> <p>关于<strong>协程</strong>的使用,也是现学,有可能因为使用方法不当而得出以上的结论。如有错误,欢迎指正。</p> </blockquote> <div class="clearfix"></div>
<div class="col-md-12 mt-5">
<p>上一个:<a href="/news/article-77682.htm">扬州宠物医院营业时间表(扬州宠物医院营业时间表电话)</a></p>
<p>下一个:<a href="/news/article-78379.htm">中国兽药十大名牌厂家排名百度百科图片(国内排名前十的兽药厂家)</a></p>
</div>
</div>
<div class="col-md-3">
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">热门文章</h3>
</div>
<div class="panel-body">
<ul class="p-0 x-0" style="list-style: none;margin: 0;padding: 0;">
<li class="py-2"><a href="/news/article-74980.htm" title="凭什么猫不能打新猫(凭什么猫不能打新猫呢)">凭什么猫不能打新猫(凭什么猫不能打新猫呢)</a></li>
<li class="py-2"><a href="/free-nodes/2025-3-22-node-share-links.htm" title="VPN浏览器 | 3月22日18.5M/S|免费Clash/V2ray/SSR/Shadowrocket订阅节点地址">VPN浏览器 | 3月22日18.5M/S|免费Clash/V2ray/SSR/Shadowrocket订阅节点地址</a></li>
<li class="py-2"><a href="/news/article-77682.htm" title="扬州宠物医院营业时间表(扬州宠物医院营业时间表电话)">扬州宠物医院营业时间表(扬州宠物医院营业时间表电话)</a></li>
<li class="py-2"><a href="/news/article-74605.htm" title="宠物粮食上市(宠物粮食上市公司名单)">宠物粮食上市(宠物粮食上市公司名单)</a></li>
<li class="py-2"><a href="/free-nodes/2025-3-15-free-node-subscribe.htm" title="VPN浏览器 | 3月15日18.7M/S|免费SSR/V2ray/Shadowrocket/Clash订阅节点地址">VPN浏览器 | 3月15日18.7M/S|免费SSR/V2ray/Shadowrocket/Clash订阅节点地址</a></li>
<li class="py-2"><a href="/news/article-76317.htm" title="动物疫苗的发展现状论文(动物疫苗市场分析)">动物疫苗的发展现状论文(动物疫苗市场分析)</a></li>
<li class="py-2"><a href="/news/article-79048.htm" title="天津免费领养猫的地方(天津免费领养猫的地方有哪些)">天津免费领养猫的地方(天津免费领养猫的地方有哪些)</a></li>
<li class="py-2"><a href="/news/article-79745.htm" title="在定义C++, C通用接口函数时让C++接口支持默认参数">在定义C++, C通用接口函数时让C++接口支持默认参数</a></li>
<li class="py-2"><a href="/news/article-74981.htm" title="养猫不可以养什么植物(养猫不能养哪几种花)">养猫不可以养什么植物(养猫不能养哪几种花)</a></li>
<li class="py-2"><a href="/free-nodes/2025-3-18-free-node-subscribe.htm" title="VPN浏览器 | 3月18日18.3M/S|免费V2ray/Clash/SSR/Shadowrocket订阅节点地址">VPN浏览器 | 3月18日18.3M/S|免费V2ray/Clash/SSR/Shadowrocket订阅节点地址</a></li>
</ul>
</div>
</div>
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">归纳</h3>
</div>
<div class="panel-body">
<ul class="p-0 x-0" style="list-style: none;margin: 0;padding: 0;">
<li class="py-2">
<h4><span class="badge" style="float: right;">28</span> <a href="/date/2025-03/" title="2025-03 归档">2025-03</a></h4>
</li>
</ul>
</div>
</div>
</div>
</div>
</div>
</section>
<style>
.video-heading {
max-width: 750px;
margin: 0 auto;
}
.video-heading h3 {
font-size: 60px;
opacity: 0.8;
line-height: 58px;
font-weight: 300;
text-transform: capitalize;
color: #fff;
}
.video-responsive {
padding-bottom: 600px;
position: relative;
width: 100%;
}
.canvas,
.video {
left: 0;
position: absolute;
top: 0;
background: #000;
z-index: 5;
overflow: hidden;
width: 100%;
height: 600px;
object-fit: cover;
}
#over_video {
position: absolute;
width: 100%;
height: 100%;
text-align: center;
top: 0;
z-index: 10;
color: #FFF;
}
.bg-mask {
background: rgba(0, 0, 0, 0.3);
height: inherit;
display: grid;
align-items: center;
padding: 0 15px;
}
@media screen and (max-width: 568px) {
.canvas,
.video {
height: 400px;
}
.video-responsive {
padding-bottom: 400px;
}
.video-heading h3 {
font-size: 35px;
line-height: 48px;
margin-bottom: 20px;
}
}
</style>
<script src="/assets/website/js/frontend/vmess_node/canvas-video-player.js"></script>
<script>
var isIOS = /iPad|iPhone|iPod/.test(navigator.platform);
if (isIOS) {
var canvasVideo = new CanvasVideoPlayer({
videoSelector: '.video',
canvasSelector: '.canvas',
timelineSelector: false,
autoplay: true,
makeLoop: true,
pauseOnClick: false,
audio: false
});
} else {
// Use HTML5 video
document.querySelectorAll('.canvas')[0].style.display = 'none';
}
</script>
<!-- scripts -->
<footer>
<!-- footer -->
<section class="w3l-footer">
<div class="w3l-footer-16-main py-5">
<div class="container">
<div class="d-flex below-section justify-content-between align-items-center pt-4 mt-5">
<div class="columns text-lg-left text-center">
<p>
<a href="/">首页</a> |
<a href="/free-node/">免费节点</a> |
<a href="/news/">新闻资讯</a> |
<a href="/about-us.htm">关于我们</a> |
<a href="/disclaimer.htm">免责申明</a> |
<a href="/privacy.htm">隐私申明</a> |
<a href="/sitemap.xml">网站地图</a>
</p>
<p>VPN浏览器 推荐免费VPN节点订阅中文网 版权所有 Powered by WordPress
</p>
</div>
<div class="columns-2 mt-lg-0 mt-3">
<ul class="social">
<li><a href="#facebook"><span class="fa fa-facebook" aria-hidden="true"></span></a>
</li>
<li><a href="#linkedin"><span class="fa fa-linkedin" aria-hidden="true"></span></a>
</li>
<li><a href="#twitter"><span class="fa fa-twitter" aria-hidden="true"></span></a>
</li>
<li><a href="#google"><span class="fa fa-google-plus" aria-hidden="true"></span></a>
</li>
<li><a href="#github"><span class="fa fa-github" aria-hidden="true"></span></a>
</li>
</ul>
</div>
</div>
</div>
</div>
<!-- move top -->
<button onclick="topFunction()" id="movetop" title="Go to top">
<span class="fa fa-angle-up"></span>
</button>
<script>
// When the user scrolls down 20px from the top of the document, show the button
window.onscroll = function() {
scrollFunction()
};
function scrollFunction() {
if (document.body.scrollTop > 20 || document.documentElement.scrollTop > 20) {
document.getElementById("movetop").style.display = "block";
} else {
document.getElementById("movetop").style.display = "none";
}
}
// When the user clicks on the button, scroll to the top of the document
function topFunction() {
document.body.scrollTop = 0;
document.documentElement.scrollTop = 0;
}
</script>
<!-- //move top -->
<script>
$(function() {
$('.navbar-toggler').click(function() {
$('body').toggleClass('noscroll');
})
});
</script>
</section>
<!-- //footer -->
</footer>
<!-- jQuery -->
<script src="/assets/website/js/frontend/vmess_node/jquery-3.5.1.min.js"></script>
<!-- Template JavaScript -->
<!-- stats number counter-->
<script src="/assets/website/js/frontend/vmess_node/jquery.waypoints.min.js"></script>
<script src="/assets/website/js/frontend/vmess_node/jquery.countup.js"></script>
<script>
$('.counter').countUp();
</script>
<!-- //stats number counter -->
<script src="/assets/website/js/frontend/vmess_node/jquery.magnific-popup.min.js"></script>
<script>
$(document).ready(function() {
$('.popup-with-zoom-anim').magnificPopup({
type: 'inline',
fixedContentPos: false,
fixedBgPos: true,
overflowY: 'auto',
closeBtnInside: true,
preloader: false,
midClick: true,
removalDelay: 300,
mainClass: 'my-mfp-zoom-in'
});
$('.popup-with-move-anim').magnificPopup({
type: 'inline',
fixedContentPos: false,
fixedBgPos: true,
overflowY: 'auto',
closeBtnInside: true,
preloader: false,
midClick: true,
removalDelay: 300,
mainClass: 'my-mfp-slide-bottom'
});
});
</script>
<script src="/assets/website/js/frontend/vmess_node/owl.carousel.js"></script>
<!-- testimonials for owlcarousel -->
<script>
$(document).ready(function() {
$('.owl-one').owlCarousel({
loop: true,
margin: 0,
nav: false,
responsiveClass: true,
autoplay: false,
autoplayTimeout: 5000,
autoplaySpeed: 1000,
autoplayHoverPause: false,
responsive: {
0: {
items: 1,
nav: false
},
480: {
items: 1,
nav: false
},
667: {
items: 1,
nav: false
},
1000: {
items: 1,
nav: false
}
}
})
})
</script>
<!-- //testimonials script for owlcarousel -->
<!-- for blog carousel slider -->
<script>
$(document).ready(function() {
$('.owl-two').owlCarousel({
stagePadding: 20,
margin: 15,
nav: false,
loop: false,
responsive: {
0: {
items: 1
},
600: {
items: 2
},
1000: {
items: 3
}
}
})
})
</script>
<!-- //for blog carousel slider -->
<script>
var lFollowX = 0,
lFollowY = 0,
x = 0,
y = 0,
friction = 1 / 30;
function animate() {
x += (lFollowX - x) * friction;
y += (lFollowY - y) * friction;
translate = 'translate(' + x + 'px, ' + y + 'px) scale(1.1)';
$('.banner-image').css({
'-webit-transform': translate,
'-moz-transform': translate,
'transform': translate
});
window.requestAnimationFrame(animate);
}
$(window).on('mousemove click', function(e) {
var lMouseX = Math.max(-100, Math.min(100, $(window).width() / 2 - e.clientX));
var lMouseY = Math.max(-100, Math.min(100, $(window).height() / 2 - e.clientY));
lFollowX = (20 * lMouseX) / 100; // 100 : 12 = lMouxeX : lFollow
lFollowY = (10 * lMouseY) / 100;
});
animate();
</script>
<!-- disable body scroll which navbar is in active -->
<script>
$(function() {
$('.navbar-toggler').click(function() {
$('body').toggleClass('noscroll');
})
});
</script>
<!--/MENU-JS-->
<script>
$(window).on("scroll", function() {
var scroll = $(window).scrollTop();
if (scroll >= 80) {
$("#site-header").addClass("nav-fixed");
} else {
$("#site-header").removeClass("nav-fixed");
}
});
//Main navigation Active Class Add Remove
$(".navbar-toggler").on("click", function() {
$("header").toggleClass("active");
});
$(document).on("ready", function() {
if ($(window).width() > 991) {
$("header").removeClass("active");
}
$(window).on("resize", function() {
if ($(window).width() > 991) {
$("header").removeClass("active");
}
});
});
</script>
<!--//MENU-JS-->
<!-- Bootstrap JS -->
<script src="/assets/website/js/frontend/vmess_node/bootstrap.min.js"></script>
<script src="https://www.freeclashnode.com/assets/js/frontend/invite-url.js"></script>
<script src="/assets/website/js/frontend/G.js"></script>
</body>
</html>