File size: 86,947 Bytes
5edac6d
141ce89
 
5edac6d
 
32f9e57
5edac6d
 
 
 
 
541f9cd
 
5edac6d
 
 
 
 
 
 
 
 
d3ca070
7c3ceb1
5747010
32f9e57
 
dd10a48
32f9e57
 
 
 
 
 
3184c71
 
9ed883a
d90b6b8
702d11f
d421a6e
5edac6d
 
 
 
 
 
141ce89
5edac6d
 
ed94ada
5edac6d
 
 
 
 
 
 
 
 
 
 
 
 
1a80a66
5edac6d
 
ada5c0c
5edac6d
702d11f
1f72e50
 
 
b874666
f7bfe98
b874666
 
1c365c7
 
 
 
233aef1
9d0e9c8
 
 
 
ce5b544
5dcf83f
9d0e9c8
cd2a3be
 
d84903c
 
4928a16
e78d7c7
4928a16
b42a9db
9a3cbb5
b42a9db
8c3f4a5
 
141ce89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185f58
141ce89
 
 
 
 
 
 
 
 
089ac30
141ce89
 
089ac30
71d36d1
467f3aa
aeb7722
dd6dc02
 
089ac30
 
141ce89
 
71d36d1
141ce89
 
 
 
 
6532dec
 
9a3cbb5
2b853f2
 
00a39ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2b853f2
b42a9db
9a3cbb5
 
 
b42a9db
 
 
 
 
 
 
 
 
 
1ea45cf
b42a9db
 
 
1ea45cf
 
 
b3b9b76
858f874
b42a9db
9a3cbb5
 
 
cb01cad
 
 
 
 
1ea45cf
 
 
 
b42a9db
 
 
 
cb01cad
 
 
 
 
b42a9db
 
8a20586
 
 
 
 
 
 
 
 
 
 
 
 
59f14fa
 
b42a9db
4928a16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b3b9b76
d84903c
4cc267c
 
9d0e9c8
0e213a6
 
 
9d0e9c8
 
 
 
 
 
 
 
d0d56b0
 
 
 
 
 
9d0e9c8
 
4cc267c
9d0e9c8
21842d6
9d0e9c8
 
 
 
 
 
1b59e3b
 
 
927ba81
1b59e3b
ce5b544
5dcf83f
1b59e3b
5dcf83f
 
 
 
21842d6
9d0e9c8
f93be97
a732d4f
d84903c
 
 
 
f93be97
d84903c
 
 
 
 
 
 
 
 
 
 
 
 
 
a732d4f
d84903c
 
 
4cc267c
b874666
 
d0fdfbe
b874666
 
 
 
 
 
 
d0fdfbe
b874666
 
 
 
e34124f
45e0238
 
3f81762
17ca76c
89d8a08
 
46f4c4b
 
9674fb9
17ca76c
 
 
 
 
f10bbaf
e5883f8
 
 
 
 
219b587
a8f1d79
 
46f4c4b
585f92d
 
46f4c4b
 
9a3cbb5
2bd5e1f
9a3cbb5
8c3f4a5
 
ba1c986
af1e847
ba1c986
af1e847
ba1c986
 
 
8c3f4a5
 
 
 
 
ba1c986
8c3f4a5
 
46f4c4b
 
f10bbaf
b874666
 
 
bb0c9ca
1c365c7
9674fb9
 
e7ffcdd
 
1ddf125
 
 
 
45e0238
 
 
 
 
 
 
 
 
 
 
b874666
d0fdfbe
4dee166
f7bfe98
e0a4089
233aef1
1f72e50
e0e9602
233aef1
 
 
e0e9602
1f72e50
2bd6776
1f72e50
90bffa6
8cc4d36
2bd6776
 
b61570d
1f72e50
 
 
8cc4d36
1f72e50
f5f3d9d
1f72e50
 
 
 
2bd6776
1f72e50
 
 
 
 
233aef1
e78d7c7
0db1cb0
e78d7c7
 
2d0e444
f7bfe98
8cc4d36
4b5c5e3
 
2b6080b
1f72e50
2b6080b
1f72e50
 
4b5c5e3
1f72e50
 
f43eb18
 
691b17c
5edac6d
702d11f
7154eab
 
e5f673c
7154eab
dc66c9a
 
be19420
7c3ceb1
8a383d3
 
32f9e57
5edac6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a80a66
 
5edac6d
 
 
 
73f1d5f
e5f673c
5edac6d
 
 
e5f673c
 
a331c7b
ff0411d
 
a331c7b
5edac6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66d2fc7
 
 
 
 
 
 
 
 
 
 
0ded0b0
 
 
 
 
 
 
66d2fc7
 
 
 
5edac6d
 
 
 
 
 
 
 
 
 
fdba7fc
 
5edac6d
 
 
 
 
 
 
 
73f1d5f
5edac6d
73f1d5f
 
5edac6d
 
 
 
fdba7fc
 
5edac6d
 
 
 
ca0a8d3
 
 
 
 
 
 
 
 
 
 
 
ada5c0c
 
ca0a8d3
 
 
 
5edac6d
 
d43825c
5edac6d
 
 
 
 
 
 
 
d43825c
5edac6d
 
 
bcfb892
858f874
d8a62af
914a721
dd10a48
 
7da2edb
9a3cbb5
dd10a48
 
 
 
ca0a8d3
dd10a48
 
e25f3f3
dd10a48
9a3cbb5
7da2edb
 
 
dd10a48
e25f3f3
dd10a48
 
 
 
e25f3f3
dd10a48
 
5edac6d
 
ccf3df7
87aeca0
5edac6d
 
 
 
 
 
 
 
860b20f
 
fbf6894
860b20f
 
 
 
 
 
 
 
 
9ed883a
f62a7d4
32f0c73
f62a7d4
 
 
 
 
ae6b6a0
 
 
 
 
 
 
 
 
 
f62a7d4
ae6b6a0
9ed883a
d90b6b8
 
 
8eca13f
d90b6b8
 
 
 
 
 
 
 
 
 
 
93d657b
d90b6b8
 
 
0952397
a331c7b
d90b6b8
0952397
d90b6b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3189246
454ef36
e5f673c
454ef36
96a8500
1212925
ccec0f4
5cea955
d90b6b8
 
 
fce2699
 
 
 
454ef36
fce2699
 
 
 
 
 
 
 
 
 
b87aac4
f38cdce
53aaa23
b87aac4
fce2699
 
 
 
 
 
9ed883a
ccf3df7
454ef36
 
9ed883a
 
5edac6d
860b20f
 
 
 
 
 
 
87c1054
 
 
d84903c
87c1054
 
860b20f
5edac6d
 
 
 
 
56fd6e3
5edac6d
 
 
 
 
56fd6e3
5edac6d
 
 
 
 
56fd6e3
5edac6d
1757d87
5edac6d
 
860b20f
 
 
 
 
 
 
 
 
f550527
860b20f
 
 
 
 
1757d87
860b20f
 
858f874
 
 
891dd71
858f874
860b20f
 
5edac6d
 
 
 
32f0c73
 
 
5cc514e
32f0c73
 
 
5edac6d
7206363
 
5edac6d
32f0c73
5edac6d
 
4dee166
 
235a43e
4dee166
235a43e
5edac6d
 
 
3b25af6
 
5edac6d
 
 
 
 
 
3b25af6
 
 
 
 
860b20f
ebe38e7
 
5edac6d
860b20f
 
858f874
3b25af6
5edac6d
 
4dee166
 
 
 
 
 
 
 
2b391b7
af5e898
 
 
4dee166
2b391b7
 
4dee166
5edac6d
130d120
5edac6d
 
32f0c73
2b391b7
 
5edac6d
 
2b391b7
d43825c
2b391b7
5edac6d
 
2b391b7
 
5edac6d
2e6d65c
9ef9d5a
 
 
 
 
 
5edac6d
 
 
 
9ed883a
 
5edac6d
2e6d65c
d1b54e9
45c3baa
6a0d0d0
d1b54e9
02a524a
0d29a46
2e6d65c
4a9eea6
 
8bb0a31
 
891dd71
53fb87e
591c00d
53fb87e
35b9b9e
53fb87e
891dd71
e5f673c
891dd71
 
 
e5f673c
891dd71
 
53fb87e
891dd71
 
 
 
5edac6d
 
 
 
 
 
 
 
 
 
 
8cc4d36
 
e7ffcdd
 
 
 
 
8cc4d36
e7ffcdd
 
8cc4d36
433f5fe
 
a09b48b
45db887
433f5fe
5e19412
606185f
4a9eea6
 
1195ec7
606185f
4a9eea6
1195ec7
5e19412
4a9eea6
 
247e008
5e19412
 
4a9eea6
606185f
45db887
 
38fd0ef
261d763
0bb65ea
52cc181
0bb65ea
52cc181
0bb65ea
52cc181
 
0bb65ea
 
 
 
261d763
a2efb40
0bb65ea
52cc181
0bb65ea
52cc181
0bb65ea
52cc181
0bb65ea
 
 
 
261d763
52cc181
 
 
 
 
0237f9a
 
5eb43d6
21c6a29
433f5fe
5edac6d
5e19412
38e525d
5e19412
 
 
 
 
416fc4a
 
38e525d
 
fc18035
 
 
ae678bc
 
fc18035
ae678bc
608896e
fc18035
 
97419ef
a844ec1
 
5d40f7a
a844ec1
 
 
 
 
 
97419ef
2a23b6c
6234322
2a23b6c
c5a073b
2a23b6c
97419ef
a844ec1
32f9e57
 
a844ec1
608896e
32f9e57
 
e4cdf4d
e5f673c
235a43e
860b20f
45c3baa
ac9ad2c
5edac6d
 
32f0c73
5edac6d
36b2f88
e4cdf4d
 
 
65d9d5b
e4cdf4d
5120a66
e4cdf4d
 
 
36b2f88
be68c2c
5edac6d
3b25af6
235a43e
 
 
 
 
 
e059fdb
235a43e
8bb0a31
45db887
d219a35
adf8ad4
e059fdb
235a43e
 
 
 
 
 
 
 
d8a62af
 
 
 
 
 
 
7154eab
d8a62af
891dd71
52cc181
41afa9b
 
e5f673c
d8a62af
 
 
 
 
 
 
 
 
 
 
858f874
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcfb892
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235a43e
5747010
c06dab4
32f9e57
0e3b32d
b4e8d18
ddf6d4e
32f9e57
860b20f
c06dab4
32f9e57
ddf6d4e
32f9e57
d4efa5b
914a721
32f9e57
103546a
860b20f
103546a
dc66c9a
103546a
32f0c73
103546a
 
32f9e57
c06dab4
 
3b25af6
c06dab4
 
 
 
 
 
 
 
32f9e57
 
 
 
dc66c9a
32f9e57
32f0c73
32f9e57
 
 
d4efa5b
 
 
 
 
 
 
 
 
914a721
 
32f9e57
 
 
6532dec
e90de0c
1357aec
 
9a3cbb5
1357aec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcfb892
 
 
858f874
 
 
d8a62af
 
 
9a3cbb5
 
 
d8a62af
 
858f874
 
1357aec
76bc1a2
32f0c73
1357aec
c06dab4
235a43e
262ef94
5747010
e90de0c
b309c40
d3ca070
5edac6d
 
32f9e57
5edac6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e81a698
5edac6d
 
 
 
e81a698
5edac6d
 
 
 
a331c7b
5edac6d
 
 
 
 
97582ea
5bf7302
 
 
 
 
 
 
675cfe9
 
42e0044
1bc0c07
5bf7302
 
881af9d
0d29a46
d349042
881af9d
97582ea
881af9d
 
 
5edac6d
 
 
 
 
 
 
e81a698
73f1d5f
c184879
12e63ff
c184879
 
 
73f1d5f
66d2fc7
adce112
 
 
 
e81a698
 
32f0c73
adce112
66d2fc7
e81a698
 
5edac6d
dd10a48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e5f673c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59891c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d421a6e
 
 
 
 
1357aec
 
76bc1a2
36e015f
 
 
1d3b1f7
36e015f
 
 
 
1d3b1f7
 
36e015f
1d3b1f7
 
36e015f
 
1d3b1f7
36e015f
990cad9
e81a698
8a383d3
ff0411d
1357aec
f19ad30
4101c27
8a383d3
f19ad30
 
51af174
1357aec
e5f673c
 
 
1357aec
 
51af174
f19ad30
 
51af174
 
 
 
cd2a3be
247e008
 
 
 
 
f19ad30
247e008
f19ad30
247e008
 
732d2d6
 
51af174
 
 
 
 
 
f19ad30
4101c27
 
29844b7
cd2a3be
247e008
cd2a3be
23742ac
cd2a3be
af1e847
 
 
247e008
cd2a3be
 
247e008
af1e847
fe8f569
af1e847
fe8f569
af1e847
fe8f569
 
af1e847
fe8f569
af1e847
 
 
247e008
 
 
 
 
 
 
 
 
 
 
 
979b5c4
247e008
 
cd2a3be
29844b7
 
 
1357aec
f19ad30
29844b7
1357aec
29844b7
f19ad30
 
39c814c
29844b7
1357aec
141ce89
e5f673c
 
 
 
141ce89
 
 
 
 
 
 
1357aec
 
 
29844b7
f19ad30
 
141ce89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37c0f23
141ce89
 
e8be56f
141ce89
675fa08
141ce89
 
 
675fa08
 
141ce89
 
 
39c814c
29844b7
 
 
979b5c4
 
 
 
 
 
 
 
 
 
 
29844b7
cd2a3be
979b5c4
 
 
 
 
f19ad30
979b5c4
f19ad30
979b5c4
 
732d2d6
979b5c4
 
 
29844b7
 
ad9c00b
8ff0718
29844b7
1357aec
f19ad30
29844b7
 
 
141ce89
2d0e444
ad9c00b
 
2d0e444
ad9c00b
2d0e444
 
ad9c00b
 
 
 
 
2d0e444
 
ad9c00b
2d0e444
ad9c00b
 
 
 
 
 
2d0e444
ad9c00b
2d0e444
ad9c00b
 
 
 
 
 
 
 
2d0e444
ad9c00b
 
b85e015
e81a698
86677df
990cad9
be19420
 
3b29a44
af1e847
0d6e48c
 
3b29a44
 
af1e847
3b29a44
 
c95d0d3
31b53f0
 
6bcd061
 
 
 
 
84b12cb
be19420
e81a698
8a383d3
584ba32
8a383d3
24f1cbd
e81a698
 
24f1cbd
 
6797e2d
 
 
 
24f1cbd
e81a698
24f1cbd
e81a698
c68e8a7
24f1cbd
 
 
584ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24f1cbd
 
 
 
6797e2d
24f1cbd
699c648
4938fd8
 
 
 
 
 
 
 
 
 
 
 
 
 
dd27da8
4938fd8
 
 
ad276ce
 
 
6c448c0
ff0411d
1f4422f
1dfbc55
 
 
31b53f0
93eb493
19bc884
93eb493
ddae241
57476cc
3744ac2
359c2b8
3744ac2
 
6c448c0
 
 
 
 
 
 
ed94ada
bd571b5
ed94ada
 
6c448c0
a30b66b
6c448c0
1dfbc55
798f846
 
 
 
 
 
 
28585d6
153e8b3
 
3a91f8d
28585d6
ada225d
 
 
 
a46f21b
1d1fa7f
 
691b17c
199fdd9
4ce79fb
 
 
 
 
 
 
 
 
 
 
ada225d
cce1770
3dba961
 
 
267deca
 
3dba961
e5f673c
3dba961
 
e5f673c
 
 
3dba961
 
267deca
3dba961
cce1770
6a60cd9
f19ad30
 
 
6a60cd9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f65f35d
 
 
b87aac4
24cf03b
f65f35d
b450bc8
11bb6c0
24cf03b
11bb6c0
b87aac4
f65f35d
 
f38cdce
f65f35d
 
 
 
 
 
 
 
 
 
 
584ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
1d1fa7f
 
f65f35d
 
 
 
 
 
 
 
 
 
febac0d
 
 
 
 
b87aac4
d9e36f7
835be01
1e3d783
 
 
 
 
 
f2acce5
dd099e7
00a39ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2acce5
25a6d74
3dba961
f10a15b
5546b26
e5f673c
3457317
aa36fe2
3457317
febac0d
f10a15b
e5f673c
4101c27
 
 
 
 
 
ab60cf4
798f846
1bc0c07
4ebc48f
52f7098
4ebc48f
a208a32
 
 
 
b924e22
16ef1ca
f03195a
408e795
4ebc48f
ad0bfa9
 
 
24f1cbd
ad0bfa9
 
4ebc48f
 
 
 
9d34141
ad0bfa9
 
 
cdaa527
 
 
 
 
 
28026f6
 
 
 
4ebc48f
28026f6
250dec4
ad9c00b
 
d43825c
ad9c00b
e5f673c
1e3d783
febac0d
6c448c0
9be3d38
 
2f0bcb7
 
36aeae9
 
 
 
7a3944e
c620be9
141ce89
bde0098
1de54db
569bae0
2d0e444
c620be9
 
 
2d0e444
dc2754c
1de54db
 
 
569bae0
ad9c00b
7b401d5
 
5200c8d
1de54db
7b401d5
5546b26
ca0a8d3
2255560
28026f6
1de54db
 
 
3a7d53a
 
1ae02b6
153e8b3
4df10d7
 
 
ecc6c2f
4df10d7
153e8b3
28026f6
f19ad30
c00437f
cce1770
3cbbc42
cce1770
d421a6e
 
5edac6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43408d7
0e3b32d
9a3cbb5
5edac6d
 
32f0c73
5edac6d
0e3b32d
9a3cbb5
3b25af6
43408d7
 
 
 
3b25af6
43408d7
 
 
5edac6d
 
 
 
 
97419ef
5edac6d
97419ef
5edac6d
97419ef
5edac6d
 
 
 
 
32f9e57
 
 
 
 
 
 
 
 
 
 
 
5edac6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7d7bda
 
 
 
 
 
 
5edac6d
 
 
 
 
233aef1
06105c0
 
4928a16
f752db1
d43825c
c7d7bda
 
 
 
 
5edac6d
1f72e50
5edac6d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
# from typing import Any, Coroutine
from uuid import UUID
from langchain.schema.agent import AgentAction, AgentFinish
import openai
import os
# from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.chat_models import AzureChatOpenAI
from langchain.document_loaders import DirectoryLoader
from langchain.chains import RetrievalQA  
# from langchain.vectorstores import Pinecone
from langchain.vectorstores.pinecone import Pinecone
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.agents import Tool
# from langchain.agents import load_tools
from langchain.tools import BaseTool
from langchain.tools import DuckDuckGoSearchRun
from langchain.utilities import WikipediaAPIWrapper
from langchain.python import PythonREPL
from langchain.chains import LLMMathChain
from langchain.memory import ConversationBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain.agents import ZeroShotAgent, AgentExecutor
from langchain.agents import OpenAIMultiFunctionsAgent
from langchain.prompts import MessagesPlaceholder
from langchain.chains.summarize import load_summarize_chain
from langchain.schema.messages import (
    AIMessage,
    BaseMessage,
    FunctionMessage,
    SystemMessage,
)
# from langchain import LLMChain
from langchain.chains import LLMChain
import azure.cognitiveservices.speech as speechsdk
import requests

import sys
import pinecone      
from pinecone.core.client.configuration import Configuration as OpenApiConfiguration
import gradio as gr
import time

import glob
from typing import Any, Dict, List, Mapping, Optional
from multiprocessing import Pool
from tqdm import tqdm
from pygame import mixer

from langchain.document_loaders import (
    CSVLoader,
    EverNoteLoader,
    PyMuPDFLoader,
    TextLoader,
    UnstructuredEmailLoader,
    UnstructuredEPubLoader,
    UnstructuredHTMLLoader,
    UnstructuredMarkdownLoader,
    UnstructuredODTLoader,
    UnstructuredPowerPointLoader,
    UnstructuredWordDocumentLoader,
    UnstructuredExcelLoader
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.text_splitter import TokenTextSplitter
from langchain.docstore.document import Document
import langchain
import asyncio
from playwright.async_api import async_playwright

from langchain.embeddings.huggingface import HuggingFaceEmbeddings

from langchain.llms.base import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.schema import (
    Generation,
    LLMResult
    )
import time
from datasets import load_dataset

from transformers import pipeline

import soundfile as sf
from scipy.io import wavfile

import re

from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
import torch
from codeinterpreterapi import CodeInterpreterSession
import html2text

from interpreter.code_interpreter import CodeInterpreter
# from interpreter.code_block import CodeBlock

import regex

from langchain.callbacks.base import BaseCallbackHandler
from collections.abc import Generator
from queue import Queue, Empty
from threading import Thread


class QueueCallback(BaseCallbackHandler):
    """Callback handler for streaming LLM responses to a queue."""

    def __init__(self, q):
        self.q = q

    def on_llm_new_token(self, token: str, **kwargs: any) -> None:
        self.q.put(token)
    
    def on_agent_finish(self, finish: AgentFinish, *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any) -> Any:
        self.q.put(super().on_agent_finish(finish, run_id=run_id, parent_run_id=parent_run_id, **kwargs))

    def on_agent_action(self, action: AgentAction, *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any) -> Any:
        self.q.put(super().on_agent_action(action, run_id=run_id, parent_run_id=parent_run_id, **kwargs))

    def on_llm_end(self, *args, **kwargs: any) -> None:
        return self.q.empty()

def stream(input_text) -> Generator:
    # Create a Queue
    q = Queue()
    job_done = object()

    # Create a funciton to call - this will run in a thread

    def task():
        resp = agent.run(input_text, callbacks=[QueueCallback(q)])
        q.put(job_done)

    # Create a thread and start the function
    t = Thread(target=task)
    t.start()

    content = ""

    # Get each new token from the queue and yield for our generator
    counter = 0
    while True:
        try:
            next_token = q.get(True, timeout=60)
            print("next_token: ", str(next_token))
            if next_token == None:
                next_token = ""
            # counter = counter + 1
            # print("No data, retry number: ", counter)
            if counter > 3:
                break
            if next_token is job_done:
                break
            content += str(next_token)
            yield next_token, content
        except Empty:
            continue


global CurrentAgent
CurrentAgent = os.environ["agent_type"]

global ChatbotHistory
ChatbotHistory = []
# timestr = time.strftime("%Y-%m-%d-%H:%M:%S")
#     # Running_history = Running_history + [(None, 'Timestamp: '+timestr)]
#     # yield Running_history
# WelcomeStr = """
#     This is AI Assistant powered by MECH Core Team.
#     It is connected remotely with GPT4. The following function is available for you.
#     1. Free Chat with AI assistant
#     2. Search Information and Engineering Data: Vector Database + Internet
#     3. Make specific task with tools:
#         - Text to Sound | Sound to Text | Doc summary
#         - Code interpret (Beta version)
#         - Text to Image (forecast)
#     """
#     # Running_history = Running_history + [(None, timestr+'\n'+WelcomeStr)]
# ChatbotHistory = ChatbotHistory + [(None, timestr+'\n'+WelcomeStr)]

class CodeBlock:
    '''
    CodeBlock Class which is able to run in Code Runner
    '''
    def __init__(self, code):
        self.code = code
        self.output = ""
        self.active_line = None

    def refresh(self):
        print(f"Active line: {self.active_line}")
        print(f"Output: {self.output}")


code_1 = """
for i in range(3):
  print("hello world")
"""
code_2 = """
!pip install python-pptx
"""

def Code_Runner(code_raw: str):
    # interpreter = CodeInterpreter(language="python", debug_mode=True)
    global CurrentAgent
    if CurrentAgent == "Zero Short React 2":
        code_raw = RemoveIndent(code_raw)
    if '!pip' in code_raw or 'pip install' in code_raw:
        try:
            code_raw=code_raw.replace('!pip', 'pip')
        except Exception as e:
            print(e)
        interpreter = CodeInterpreter(language="shell", debug_mode=True)
    else:
        interpreter = CodeInterpreter(language="python", debug_mode=True)
    # interpreter = CodeInterpreter(language=lang, debug_mode=True)
    code_block = CodeBlock(code_raw)
    interpreter.active_block = code_block
    output = interpreter.run()
    print("Real Output: \n", output)
    try:
        if output.strip() =="" or output == []:
            output = "It is Done. No Error Found."
    except Exception as e:
        print(e)
    return output

def RemoveIndent(code_string, indentation_level=4):
    lines = code_string.split('\n')
    corrected_lines = []
    for line in lines:
        if line.strip() == "":
            continue
        line_without_indentation = line[indentation_level:] \
            if line.startswith(' ' * indentation_level) else line
        corrected_lines.append(line_without_indentation)
    corrected_content = '\n'.join(corrected_lines)
    return corrected_content


Code_Runner(code_1)
Code_Runner(code_2)

async def TestCodeInterpret(CustomMessage:str):
    # create a session
    session = CodeInterpreterSession(llm=GPTfake)
    session.start()

    # generate a response based on user input
    response = await session.generate_response(CustomMessage)

    # output the response (text + image)
    print("AI: ", response.content)
    for file in response.files:
        file.show_image()

    # terminate the session
    session.stop()


ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
sample = ds[0]["audio"]

global Audio_output
Audio_output = []

def speech_to_text_loc(audio):
    device = "cpu"
    pipe = pipeline(
    "automatic-speech-recognition",
    model="openai/whisper-small",
    chunk_length_s=30,
    device=device,
    )
    print("type of audio:", type(audio))
    if type(audio) == dict:
        text = pipe(audio.copy(), batch_size=2)["text"]
    else:
        text = pipe(audio, batch_size=2)["text"]
    
    return text

print("voice to text loc: ", speech_to_text_loc(sample))

def text_to_speech_loc(text):
    device = "cpu"
    pipe = pipeline(
    "text-to-speech",
    model="microsoft/speecht5_tts",
    device=device,
    )
    output = pipe(text)
    speech = output["audio"]
    sampling_rate = output["sampling_rate"]
    print("Type of speech: ", type(speech))
    print("sampling_rate: ", sampling_rate)

    timestr = time.strftime("%Y%m%d-%H%M%S")
    # sampling_rate = 16000
    with open('sample-' + timestr + '.wav', 'wb') as audio:
        wavfile.write(audio, sampling_rate, speech)
    # audio = sf.write("convert1.wav", speech, samplerate=16000)
    print("audio: ", audio)
    return audio

def text_to_speech_loc2(Text_input):
    global Audio_output
    processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
    model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
    vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")

    inputs = processor(text = Text_input, return_tensors="pt")

    # load xvector containing speaker's voice characteristics from a dataset
    embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
    speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)

    speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
    print("Type of speech: ", type(speech))

    timestr = time.strftime("%Y%m%d-%H%M%S")
    # sampling_rate = 16000
    with open('sample-' + timestr + '.wav', 'wb') as audio:
        sf.write(audio, speech.numpy(), samplerate=16000)
    # audio = sf.write("convert1.wav", speech, samplerate=16000)
    print("audio: ", audio)
    Audio_output.append(audio.name)
    return audio


print("text to speech2: ", text_to_speech_loc2("Good morning."))

class GPTRemote(LLM):
    n: int

    @property
    def _llm_type(self) -> str:
        return "custom"
    
    def _call(
            self, 
            prompt: str, 
            stop: Optional [List[str]] = None, 
            run_manager: Optional[CallbackManagerForLLMRun] = None, 
            **kwargs: Any
            ) -> str:
        print("prompt:", prompt)
        
        output = asyncio.run(start_playwright(prompt))
            # output = "test custom llm"
        # print(type(output))
        if output is None:
            output = "No Feedback"
        print("-" * 20)
        print('Raw: \n', output)
        keywords = ['Action:', 'Action Input:', 'Observation:', 'Thought:', 'Final Answer:']
        # print("Judge 1: ", 'Action:' in output)
        # print("Judge 2: ", 'Action Input:' in output)
        # print("Judge 3: ", 'Observation:' in output)
        # print("Judge 4: ", 'Thought:' in output)
        # print("Judge Final Answer: ",'Final Answer:' in output)

        # for item in keywords:
        #     if item in output:
        #         output = output.replace(item, '\n'+item)
        #     if '|' in output:
        #         output = output.replace('|', '')

        # if 'Thought:' not in output:
        #     output = 'Thought:'+ output

        # if 'Action Input:' in output and 'Observation:' in output:
        if 'Action:' in output and 'Observation:' in output:
            output = output.split('Observation:')[0]

        global CurrentAgent
        # if Choice == "Structured Zero Short Agent":
        if CurrentAgent == 'Structured Zero Short Agent':
            try:
                # temp = output.split('{')[1].split('}')[0:-2]

                if output.strip()[-1] == '}' and 'Action:' in output:
                    print("valid command")
                elif 'Action:' in output:
                    output = output + '}'
                    print("corrected command")

                pattern = r'\{((?:[^{}]|(?R))*)\}'
                temp = regex.search(pattern, output)
                rrr = temp.group()
                output = output.replace(rrr, '```'+ '\n' + rrr + '\n'+'```')

                # print("Found command: ", output)
            except Exception as e:
                print("model internal error:", e)
        print("-" * 20)
        print("Treated output: \n", output)
        return output
    
    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        return [("n", self.n)]
    
    def treat_output(text):
        
        keywords = ['Action:', 'Action Input:', 'Observation:', 'Thought:', 'Final Answer:']
        for item in keywords:
            if item in text:
                text.replace(item, '\n'+item)
        print("treat output: ", text)
        return text
    # def _generate(
    #     self,
    #     prompts: List[str],
    #     stop: Optional[List[str]] = None,
    #     run_manager: Optional[CallbackManagerForLLMRun] = None,
    #     **kwargs: Any,
    # ) -> LLMResult:
    #     result = LLMResult()
    #     result.generations = [Generation("test result")]
    #     return result
    #     """Run the LLM on the given prompts."""    

GPTfake = GPTRemote(n=0)


async def start_playwright(question: str):
    start_t = time.time()
    pw = await async_playwright().start()
    browser = await pw.chromium.launch(headless=True)
    end_t = time.time()
    print("Init Browser Done:",  end_t - start_t)
    start_t = end_t
    # browser = await pw.webkit.launch(headless=True)
    page = await browser.new_page()
    
    # note all methods are async (use the "await" keyword)
    await page.goto(os.environ["Endpoint_GPT4"])
    # print("Title of Web: ", await page.title())
    end_t = time.time()
    print("New Page Done:",  end_t - start_t)
    start_t = end_t
    await page.wait_for_timeout(200)
#     print("Content of Web: ", await page.content())
#     print("Test content: ", await page.locator("//div[@class='css-zt5igj e1nzilvr3']").inner_html())
    # print("Test content: ", await page.locator("//div[@class='css-zt5igj e1nzilvr3']").inner_text())
    
    await page.locator("//textarea").fill(question)
    await page.wait_for_timeout(200)
#     print("Content of Web: ", await page.content())
#     await page.locator("//button[@class='css-1wi2cd3 e1d2x3se3']").click()
    await page.locator("//textarea").press("Enter")
    await page.wait_for_timeout(200)
#     print("Content of Web: ", await page.content())
#     print("output_text 1", await page.locator("//div[@aria-label='Chat message from assistant']").last.inner_text())
#     output_text = await page.locator("//div[@aria-label='Chat message from assistant']").last.inner_text()
#     print("output_text 1", output_text)
    output_history = "NOTHING"
    for i in range(100):
        output_text_old = await page.locator("//div[@aria-label='Chat message from assistant']").last.inner_text()
        html_content = await page.locator("//div[@aria-label='Chat message from assistant']//div[@class='stMarkdown']").last.inner_html()
        markdown_converter = html2text.HTML2Text()
        output_text = markdown_converter.handle(html_content)
        print("output_text... :")
        
        if output_text == output_history and '▌' not in output_text and output_text != "":
            end_t = time.time()
            print("Output Done:",  end_t - start_t)
            return output_text
        else:
            await page.wait_for_timeout(500)
            output_history = output_text
    print("-------- Final Answer-----------\n", output_text)

    await browser.close()



# import playsound

langchain.debug = True

global memory3
memory3 = ConversationBufferWindowMemory(memory_key="chat_history", input_key="input", output_key='output', return_messages=True)

global memory2
memory2 = ConversationBufferWindowMemory(memory_key="chat_history")
global memory_openai
memory_openai = ConversationBufferWindowMemory(memory_key="memory", return_messages=True)
global last_request
last_request = ""

# Custom document loaders
class MyElmLoader(UnstructuredEmailLoader):
    """Wrapper to fallback to text/plain when default does not work"""

    def load(self) -> List[Document]:
        """Wrapper adding fallback for elm without html"""
        try:
            try:
                doc = UnstructuredEmailLoader.load(self)
            except ValueError as e:
                if 'text/html content not found in email' in str(e):
                    # Try plain text
                    self.unstructured_kwargs["content_source"]="text/plain"
                    doc = UnstructuredEmailLoader.load(self)
                else:
                    raise
        except Exception as e:
            # Add file_path to exception message
            raise type(e)(f"{self.file_path}: {e}") from e

        return doc
    
LOADER_MAPPING = {
    ".csv": (CSVLoader, {}),
    # ".docx": (Docx2txtLoader, {}),
    ".doc": (UnstructuredWordDocumentLoader, {}),
    ".docx": (UnstructuredWordDocumentLoader, {}),
    ".enex": (EverNoteLoader, {}),
    ".eml": (MyElmLoader, {}),
    ".epub": (UnstructuredEPubLoader, {}),
    ".html": (UnstructuredHTMLLoader, {}),
    ".md": (UnstructuredMarkdownLoader, {}),
    ".odt": (UnstructuredODTLoader, {}),
    ".pdf": (PyMuPDFLoader, {}),
    ".ppt": (UnstructuredPowerPointLoader, {}),
    ".pptx": (UnstructuredPowerPointLoader, {}),
    ".txt": (TextLoader, {"encoding": "utf8"}),
    ".xls": (UnstructuredExcelLoader, {}),
    ".xlsx": (UnstructuredExcelLoader, {"mode":"elements"}),
    # Add more mappings for other file extensions and loaders as needed
}

source_directory = 'Upload Files'
global file_list_loaded
file_list_loaded = []
chunk_size = 500
chunk_overlap = 300

global file_list_by_user
file_list_by_user = []

global Filename_Chatbot
Filename_Chatbot = ""

def load_single_document(file_path: str) -> List[Document]:
    ext = "." + file_path.rsplit(".", 1)[-1]
    if ext in LOADER_MAPPING:
        loader_class, loader_args = LOADER_MAPPING[ext]
        loader = loader_class(file_path, **loader_args)
        return loader.load()

    raise ValueError(f"Unsupported file extension '{ext}'")


def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
    """
    Loads all documents from the source documents directory, ignoring specified files
    """
    all_files = []
    for ext in LOADER_MAPPING:
        all_files.extend(
            glob.glob(os.path.join(source_dir, f"**/*{ext}"), recursive=True)
        )
    filtered_files = [file_path for file_path in all_files if file_path not in ignored_files]

    with Pool(processes=os.cpu_count()) as pool:
        results = []
        with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
            for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
                results.extend(docs)
                pbar.update()

    return results

def load_documents_2(all_files: List[str] = [], ignored_files: List[str] = []) -> List[Document]:
    """
    Loads all documents from the source documents directory, ignoring specified files
    """
    # all_files = []
    # for ext in LOADER_MAPPING:
    #     all_files.extend(
    #         glob.glob(os.path.join(source_dir, f"**/*{ext}"), recursive=True)
    #     )
    filtered_files = [file_path for file_path in all_files if file_path not in ignored_files]


    results = []
    with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
        for file in filtered_files:
            docs = load_single_document(file)
            results.extend(docs)
            pbar.update()

    return results


def process_documents(ignored_files: List[str] = []) -> List[Document]:
    """
    Load documents and split in chunks
    """
    print(f"Loading documents from {source_directory}")
    documents = load_documents(source_directory, ignored_files)
    if not documents:
        print("No new documents to load")
        exit(0)
    print(f"Loaded {len(documents)} new documents from {source_directory}")
    # text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    text_splitter = TokenTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    texts = text_splitter.split_documents(documents)
    print(f"Split into {len(texts)} chunks of text (max. {chunk_size} tokens each)")
    return texts

def process_documents_2(ignored_files: List[str] = []) -> List[Document]:
    """
    Load documents and split in chunks
    """
    global file_list_loaded
    print(f"Loading documents from {source_directory}")
    print("File Path to start processing:", file_list_loaded)
    documents = load_documents_2(file_list_loaded, ignored_files)
    if not documents:
        print("No new documents to load")
        exit(0)
    print(f"Loaded {len(documents)} new documents from {source_directory}")
    # text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    text_splitter = TokenTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    texts = text_splitter.split_documents(documents)
    print(f"Split into {len(texts)} chunks of text (max. {chunk_size} tokens each)")
    return texts

def process_documents_3(ignored_files: List[str] = []) -> List[Document]:
    """
    Load documents and split in chunks
    """
    global file_list_loaded
    print(f"Loading documents from {source_directory}")
    print("File Path to start processing:", file_list_loaded)
    documents = load_documents_2(file_list_loaded, ignored_files)
    if not documents:
        print("No new documents to load")
        exit(0)
    print(f"Loaded {len(documents)} new documents from {source_directory}")
    # text_splitter = RecursiveCharacterTextSplitter(chunk_size=8000, chunk_overlap=1000)
    text_splitter = TokenTextSplitter(chunk_size=4000, chunk_overlap=500)
    texts = text_splitter.split_documents(documents)
    print(f"Split into {len(texts)} chunks of text (max. {chunk_size} tokens each)")
    return texts

def UpdateDb():
    global vectordb_p
    global index_name
    # pinecone.Index(index_name).delete(delete_all=True, namespace='')
    # collection = vectordb_p.get()
    # split_docs = process_documents([metadata['source'] for metadata in collection['metadatas']])
    # split_docs = process_documents()
    split_docs = process_documents_2()
    tt = len(split_docs)
    print(split_docs[tt-1])
    print(f"Creating embeddings. May take some minutes...")
    vectordb_p = Pinecone.from_documents(split_docs, embeddings, index_name = index_name)
    print("Pinecone Updated Done")
    print(index.describe_index_stats())

ListAgentWithRemoteGPT = ['Zero Short React 2','Zero Short Agent 2',
                            'OpenAI Multi 2', 'Conversation Agent',
                            'Code Interpreter', 'Structured Zero Short Agent']

def SummarizeDoc():
    global vectordb_p
    global Choice
    global CurrentAgent
    # pinecone.Index(index_name).delete(delete_all=True, namespace='')
    # collection = vectordb_p.get()
    # split_docs = process_documents([metadata['source'] for metadata in collection['metadatas']])
    # split_docs = process_documents()
    split_docs = process_documents_3()
    tt = len(split_docs)
    print(split_docs[tt-1])
    sum_text=""
    try:
        if CurrentAgent in ListAgentWithRemoteGPT:
            sum_chain = load_summarize_chain(GPTfake, chain_type='refine', verbose=True)
        else:
            sum_chain = load_summarize_chain(llm, chain_type='refine', verbose=True)
        sum_text = sum_chain.run(split_docs)
        return sum_text
    except Exception as e:
        print("SummarizeDoc error:", e)
        
    # sum_text = "test sum"
    



class DB_Search(BaseTool):
    name = "Vector_Database_Search"
    description = "This is the internal vector database to search information firstly. If information is found, it is trustful."
    def _run(self, query: str) -> str:
        response, source = QAQuery_p(query)
        # response = "test db_search feedback"
        return response

    def _arun(self, query: str):
        raise NotImplementedError("N/A")

class DB_Search2(BaseTool):
    name = "Vector Database Search"
    description = "This is the internal vector database to search information firstly (i.e. engineering data, acronym.)"
    def _run(self, query: str) -> str:
        response, source = QAQuery_p(query)
        # response = "test db_search feedback"
        return response

    def _arun(self, query: str):
        raise NotImplementedError("N/A")


def Text2Sound(text):

    speech_config = speechsdk.SpeechConfig(subscription=os.environ['SPEECH_KEY'], region=os.environ['SPEECH_REGION'])
    audio_config = speechsdk.audio.AudioOutputConfig(use_default_speaker=True)
    speech_config.speech_synthesis_voice_name='en-US-JennyNeural'
    # speech_synthesizer = ""
    speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
    speech_synthesis_result = speech_synthesizer.speak_text_async(text).get()
    # if speech_synthesis_result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
    #     print("Speech synthesized for text [{}]".format(text))
    # elif speech_synthesis_result.reason == speechsdk.ResultReason.Canceled:
    #     cancellation_details = speech_synthesis_result.cancellation_details
    #     print("Speech synthesis canceled: {}".format(cancellation_details.reason))
    #     if cancellation_details.reason == speechsdk.CancellationReason.Error:
    #         if cancellation_details.error_details:
    #             print("Error details: {}".format(cancellation_details.error_details))
    #             print("Did you set the speech resource key and region values?")
    print("test")
    return speech_synthesis_result
    pass



def get_azure_access_token():
    azure_key = os.environ.get("SPEECH_KEY")
    try:
        response = requests.post(
            "https://eastus.api.cognitive.microsoft.com/sts/v1.0/issuetoken",
            headers={
                "Ocp-Apim-Subscription-Key": azure_key
            }
        )
        response.raise_for_status()
    except requests.exceptions.RequestException as e:
        print(f"Error: {e}")
        return None
    # print (response.text)
    return response.text


def text_to_speech_2(text):
    global Audio_output
    access_token = get_azure_access_token()
    voice_name='en-US-AriaNeural'
    if not access_token:
        return None

    try:
        response = requests.post(
            "https://eastus.tts.speech.microsoft.com/cognitiveservices/v1",
            headers={
                "Authorization": f"Bearer {access_token}",
                "Content-Type": "application/ssml+xml",
                "X-MICROSOFT-OutputFormat": "riff-24khz-16bit-mono-pcm",
                "User-Agent": "TextToSpeechApp",
            },
            data=f"""
                <speak version='1.0' xml:lang='en-US'>
                <voice name='{voice_name}'>
                    {text}
                </voice>
                </speak>
            """,
        )
        response.raise_for_status()
        timestr = time.strftime("%Y%m%d-%H%M%S")
        with open('sample-' + timestr + '.wav', 'wb') as audio:
            audio.write(response.content)
        print ("File Name  ", audio.name)
        # print (audio)
        Audio_output.append(audio.name)
        # return audio.name
        return audio
    except requests.exceptions.RequestException as e:
        print(f"Error: {e}")
        return None

def speech_to_text(Filename_Audio_input_single):
    print("Start speech to text ....")
    access_token = get_azure_access_token()
    
    if not access_token:
        return None

    try:
        endpoint = f"https://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?language=en-US"
        headers={
            "Authorization": f"Bearer {access_token}",
            "Content-Type": "audio/wav",}

        response = requests.post(endpoint, headers=headers, data=open(Filename_Audio_input_single, "rb"))
        print("Speech to Text Raw: ", response.text)
        text_from_audio = response.text.split('DisplayText":"')[1].split('"}')[0]
        # text_from_audio = response.text('DisplayText')
        print("Speech to Text: ", text_from_audio)
        return text_from_audio
    except requests.exceptions.RequestException as e:
        print(f"Error speech_to_text: {e}")
        return None


Text2Sound_tool = Tool(
    name = "Text_To_Sound_REST_API",
    # func = Text2Sound,
    func = text_to_speech_2,
    description = "Useful when you need to convert text into sound file."
)

Text2Sound_tool2 = Tool(
    name = "Text To Sound REST API",
    # func = Text2Sound,
    func = text_to_speech_2,
    description = "Useful when you need to convert text into sound file."
)

Text2Sound_tool_loc = Tool(
    name = "Text To Sound API 2",
    # func = Text2Sound,
    func = text_to_speech_loc2,
    description = "Useful when you need to convert text into sound file."
)

Wikipedia = WikipediaAPIWrapper()
Netsearch = DuckDuckGoSearchRun()
Python_REPL = PythonREPL()

wikipedia_tool = Tool(
    name = "Wikipedia_Search",
    func = Wikipedia.run,
    description = "Useful to search a topic, country or person when there is no availble information in vector database"
)

duckduckgo_tool = Tool(
    name = "Duckduckgo_Internet_Search",
    func = Netsearch.run,
    description = "Useful to search information in internet when it is not available in other tools"    
)

python_tool = Tool(
    name = "Python_REPL",
    func = Python_REPL.run,
    description = "Useful when you need python script to answer questions. You should input python code."    
)

wikipedia_tool2 = Tool(
    name = "Wikipedia Search",
    func = Wikipedia.run,
    description = "Useful to search a topic, country or person when there is no availble information in vector database"
)

duckduckgo_tool2 = Tool(
    name = "Duckduckgo Internet Search",
    func = Netsearch.run,
    description = "Useful to search in internet for real-time information and additional information which is not available in other tools"    
)

python_tool2 = Tool(
    name = "Python REPL",
    func = Python_REPL.run,
    description = "Useful when you need python script to answer questions. You should input python code."    
)

python_tool3 = Tool(
    name = "Code Runner",
    func = Code_Runner,
    description = """Code Interpreter which is able to run code block in local machine.\n It is capable to treat **any** task by running the code and output the result. (i.e. analyzer data, modify/creat documents, draw diagram/flowchart ...)\n You should input detail code with right indentation."""
)


# tools = [DB_Search(), wikipedia_tool, duckduckgo_tool, python_tool]


os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY"]
os.environ["OPENAI_API_BASE"] = os.environ["OPENAI_API_BASE"]
os.environ["OPENAI_API_VERSION"] = os.environ["OPENAI_API_VERSION"]
# os.environ["OPENAI_API_VERSION"] = "2023-05-15"
username = os.environ["username1"]
password = os.environ["password"]
SysLock = os.environ["SysLock"] # 0=unlock 1=lock

# deployment_name="Chattester"

chat = AzureChatOpenAI(
    deployment_name=os.environ["deployment_name"],
    temperature=0,
)


llm = chat

# llm = GPTfake

llm_math = LLMMathChain.from_llm(llm)

llm_math_2 = LLMMathChain.from_llm(GPTfake)

math_tool = Tool(
    name ='Calculator',
    func = llm_math.run,
    description ='Useful for when you need to answer questions about math.'
)

math_tool_2 = Tool(
    name ='Calculator',
    func = llm_math_2.run,
    description ='Useful for when you need to answer questions about math.'
)

# openai
tools = [DB_Search(), duckduckgo_tool, python_tool, math_tool, Text2Sound_tool]

tools2 = [DB_Search2(), duckduckgo_tool2, wikipedia_tool2, python_tool2, math_tool, Text2Sound_tool2]

tools_remote = [DB_Search2(), duckduckgo_tool2, wikipedia_tool2, python_tool3, math_tool_2, Text2Sound_tool_loc]

# tools = load_tools(["Vector Database Search","Wikipedia Search","Python REPL","llm-math"], llm=llm)

# Openai embedding
embeddings_openai = OpenAIEmbeddings(deployment="model_embedding", chunk_size=15)

# huggingface embedding model
embed_model_id = 'sentence-transformers/all-MiniLM-L6-v2'

# device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
device = 'cpu'
embeddings_miniLM = HuggingFaceEmbeddings(
    model_name=embed_model_id,
    model_kwargs={'device': device},
)

# embeddings = embeddings_openai
embeddings = embeddings_miniLM


# embeddings = OpenAIEmbeddings(deployment="model_embedding_2", chunk_size=15)

pinecone.init(      
	api_key = os.environ["pinecone_api_key"],      
	# environment='asia-southeast1-gcp-free',
    environment='us-west4-gcp-free',
    # openapi_config=openapi_config      
)
# index_name = 'stla-baby'
global index_name
index_name = 'stla-back' 
index = pinecone.Index(index_name)
# index.delete(delete_all=True, namespace='')
print(pinecone.whoami())
print(index.describe_index_stats())

"""
Answer the following questions as best you can with details. 
You can always use tools to convert text to sound.
You must always check internal vector database first and try to answer the question based on the information in internal vector database only.
Only when there is no information available from vector database, you can search information by using other tools.
You have access to the following tools:

Vector Database Search: This is the internal database to search information firstly. If information is found, it is trustful.
Duckduckgo Internet Search: Useful to search information in internet when it is not available in other tools.
Wikipedia Search: Useful to search a topic, country or person when there is no availble information in vector database
Python REPL: Useful when you need python to answer questions. You should input python code.
Calculator: Useful for when you need to answer questions about math.
Text To Sound: Useful when you need to convert text into sound file."""


PREFIX = """Answer the following questions as best you can with detail information and explanation. 
You can always use tools to convert text to sound.
You must always check vector database first and try to answer the question based on the information in vector database only.
Only when there is no information available from vector database, you can search information by using other tools.
When the final answer has output files, you must output the **name** of the file.
You have access to the following tools:"""

PREFIX_2 = """You are a helpful AI assistant. You are capable to handle **any** task. Your mission is to answer the following request as best as you can with detail information and explanation. When you need information, you must always check vector database first and try to answer the question based on the information found in vector database only. Only when there is no information available from vector database, you can search information by using other tools. When the final answer has output files, you must output the **name** of the file.\n
---\n You have access to the following tools:\n"""


PREFIX_3 ="""
You are a helpful AI assistant. Your mission is to answer the following user request as best as you can with detail information and explanation. 
If you are not clear about the request, you can ask user for more details and the confirmation. You can provide additional suggestion to user on the request and ask confirmation from user. 
When you are clear about the request, you can start to answer the request by **writing a plan** firstly. In general, try to **make plans** with as few steps as possible. 
When you need information, you can use tools as below and merge all gathered information from different tools.
When you need to use "Code Runner" for code running, **Always recap the plan between each code block** (you have extreme short-term memory loss, so you need to recap the plan between each message block to retain it).
When you send a message containing code to "Code Runner", it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task. You have full access to control their computer to help them. Code entered into "Code Runner" will be executed **in the users local environment**.
If you want to send data between programming languages, save the data to a txt or json. You should finish each step and output the result with the text content.
You can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed, try again and again.
You can install new packages with pip. Try to install all necessary packages in one command at the beginning.
When a user refers to a filename, they're likely referring to an existing file in the directory you're currently in ("Code Runner" executes on the user's machine).
When a user refers to a uploaded file, they're likely referring to an existing file in {file_list_by_user}
In general, choose packages that have the most universal chance to be already installed and to work across multiple applications. Packages like ffmpeg and pandoc that are well-supported and powerful.
Write messages to the user in Markdown. When the final answer has output files, you must output the **name** of the file.
You are capable of **any** task.
---\n You have access to the following tools:\n
"""


FORMAT_INSTRUCTIONS = """Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [Vector Database Search, Duckduckgo Internet Search, Python REPL, Calculator]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question"""

FORMAT_INSTRUCTIONS_2 = """Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [Vector Database Search, Duckduckgo Internet Search, Python REPL, Calculator]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question"""


'''
When you don't have enough information, you can use tools and you must define **Action** and **Action Input** after **Thought**.
'''
FORMAT_INSTRUCTIONS_3 = """
When you don't have enough information, you can use tools and you must use the following format to define **Thought**, **Action** and **Action Input**:\n\
'''
"Thought": you should always think about what to do.\n "Action": the action to take, should be one of [{tool_names}].\n "Action Input": the input to the action.\n "Observation": the result of the action.\n
'''If **Thought**, **Action**, **Action Input** is missing in the response of using tools, you must re-write the response.\n
---\n When you are able to provide final answer, you must use the following format to define **Final Answer** after **Thought**:\n\
'''
"Thought": I now know the final answer.\n "Final Answer": the final answer to the original input question.\n
'''\n If **Thought**, **Final Answer** is missing in the response of final answer, you must re-write the response.\n\

---\nExample of using tools:\n\
```\n Question: what is architecture?\n---\n Thought: I need to check the definition of architecture in Vector Database.\n Action: Vector Database Search\n Action Input: architecture\n
```\n

Example of final answer:\n\
```\n Question: Good morning\n---\n Thought: I need to make a greeting to user.\n Final Answer: Hello, how can I do for you ?\n
```\n
"""

FORMAT_INSTRUCTIONS_STRUC = """ 
When it is necessary to use tools and you must use the following format to output "Thought", "Action" (json blob):\n
'''
Thought: you should always think about what to do and consider previous and subsequent steps
Action: 
```
$JSON_BLOB
```
Observation: the result of the action.
'''
$JSON_BLOB with the value of the following 2 keys:
    "action": **Valid value** must be one of [{tool_names}]
    "action_input": The input for the action
If **Thought**, **Action** is missing in the response of using tools, you must re-write the response.
---\n When you are able to provide final answer, you must use the following format to output "Thought", "Action" (json blob):\n
'''
Thought: I know what to respond
Action: 
```
$JSON_BLOB
```
$JSON_BLOB with value of the following 2 keys:
    "action": "Final Answer"
    "action_input": Final response to human
'''
If **Thought**, **Action** is missing in the response of using tools, you must re-write the response.

"""



SUFFIX = """
Begin!

Request: {input}
Thought: {agent_scratchpad}"""

SUFFIX2 = """Begin!\n\

{chat_history}\n\
---\n\
Question: {input}\n\
---\n\
Thought: {agent_scratchpad}\n\
"""



prompt = ZeroShotAgent.create_prompt(
    tools, 
    prefix=PREFIX,
    suffix=SUFFIX,
    # suffix=SUFFIX2,
    format_instructions=FORMAT_INSTRUCTIONS, 
    input_variables=["input", "agent_scratchpad"]
    # input_variables=["input", "chat_history", "agent_scratchpad"]
)

prompthead_openai_1 = \
"""
You are a helpful AI assistant. Your mission is to answer the following request as best as you can with detail information and explanation. 
You must always check vector database first and try to answer the request based on the information in vector database only.
Only when there is no information available from vector database, you can search information by using other tools.
"""
prompthead_openai_OR = \
"""
You are a helpful AI assistant.
"""
prompthead_openai = \
"""
You are a helpful AI assistant to answer the following questions as best as you can with detail information.
You must always search information in vector database first and answer the question based on the information in vector database only.
Only when there is no information available from vector database, you can search information by using other method.
"""


prompt_openai = OpenAIMultiFunctionsAgent.create_prompt(
    system_message = SystemMessage(
            content = prompthead_openai),
    # extra_prompt_messages = [MessagesPlaceholder(variable_name="memory")],
)

input_variables=["input", "chat_history", "agent_scratchpad"]
input_variables_2=["input", "chat_history", "agent_scratchpad", "file_list_by_user"]

agent_ZEROSHOT_REACT = initialize_agent(tools2, llm, 
                        #  agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
                         agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
                         verbose = True,
                         handle_parsing_errors = True,
                         max_iterations = int(os.environ["max_iterations"]),
                         early_stopping_method="generate",
                         agent_kwargs={
                            'prefix': PREFIX,
                            'format_instructions': FORMAT_INSTRUCTIONS,
                            'suffix': SUFFIX,
                            # 'input_variables': input_variables,
                         },
                        #  input_variables = input_variables,
                        #  agent_kwargs={
                        #     'prompt': prompt,
                        #  }
                         
                        )

agent_ZEROSHOT_REACT_2 = initialize_agent(tools_remote, GPTfake, 
                        #  agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
                         agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
                         verbose = True,
                         handle_parsing_errors = True,
                         max_iterations = int(os.environ["max_iterations"]),
                         early_stopping_method="generate",
                         memory = memory2,
                         agent_kwargs={
                            'prefix': PREFIX_2,
                            'format_instructions': FORMAT_INSTRUCTIONS_3,
                            'suffix': SUFFIX2,
                            'input_variables': input_variables,
                         
                         },
                        #  input_variables = input_variables,
                        #  agent_kwargs={
                        #     'prompt': prompt,
                        #  }
                         
                        )

agent_STRUCTURED_ZEROSHOT_REACT = initialize_agent(tools_remote, GPTfake, 
                        #  agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
                         agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
                         verbose = True,
                         handle_parsing_errors = True,
                         max_iterations = int(os.environ["max_iterations"]),
                         early_stopping_method="generate",
                         memory = memory3,
                         agent_kwargs={
                            'prefix': PREFIX_3,
                            'format_instructions': FORMAT_INSTRUCTIONS_STRUC,
                            # 'suffix': SUFFIX2,
                            "memory_prompts": [MessagesPlaceholder(variable_name="chat_history")],
                            'input_variables': input_variables_2,
                         
                         },
                        #  input_variables = input_variables,
                        #  agent_kwargs={
                        #     'prompt': prompt,
                        #  }
                         
                        )



agent_CODE_INTERPRETER = initialize_agent(tools_remote, GPTfake, 
                        #  agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
                         agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
                         verbose = True,
                         handle_parsing_errors = True,
                         max_iterations = int(os.environ["max_iterations"]),
                         early_stopping_method="generate",
                         memory = memory2,
                         agent_kwargs={
                            'prefix': PREFIX_2,
                            'format_instructions': FORMAT_INSTRUCTIONS_3,
                            'suffix': SUFFIX2,
                            'input_variables': input_variables,
                         
                         },
                        #  input_variables = input_variables,
                        #  agent_kwargs={
                        #     'prompt': prompt,
                        #  }
                         
                        )


agent_CONVERSATION = initialize_agent(tools_remote, GPTfake, 
                        #  agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
                         agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
                         verbose = True,
                         handle_parsing_errors = True,
                         max_iterations = int(os.environ["max_iterations"]),
                         early_stopping_method="generate",
                         memory = memory2,
                        #  agent_kwargs={
                        #     'prefix': PREFIX_2,
                        #     'format_instructions': FORMAT_INSTRUCTIONS_3,
                        #     'suffix': SUFFIX2,
                        #     'input_variables': input_variables,
                         
                        #  },
                        #  input_variables = input_variables,
                        #  agent_kwargs={
                        #     'prompt': prompt,
                        #  }
                         
                        )


llm_chain = LLMChain(llm=llm, prompt=prompt)
llm_chain_2 = LLMChain(llm=GPTfake, prompt=prompt)

# print("Test LLM Chain", llm_chain_2({'agent_scratchpad':"", 'input':"what is PDP?"}))

# llm_chain_openai = LLMChain(llm=llm, prompt=prompt_openai, verbose=True)

agent_core = ZeroShotAgent(llm_chain=llm_chain, tools=tools2, verbose=True)
agent_core_2 = ZeroShotAgent(llm_chain=llm_chain_2, tools=tools2, verbose=True)

agent_core_openai = OpenAIMultiFunctionsAgent(llm=llm, tools=tools, prompt=prompt_openai, verbose=True)

# agent_core_openai_2 = OpenAIMultiFunctionsAgent(llm=GPTfake, tools=tools, prompt=prompt_openai, verbose=True)

agent_ZEROSHOT_AGENT = AgentExecutor.from_agent_and_tools(
    agent=agent_core, 
    tools=tools2, 
    verbose=True, 
    # memory=memory,
    handle_parsing_errors = True,
    max_iterations = int(os.environ["max_iterations"]),
    early_stopping_method="generate",
    )

agent_ZEROSHOT_AGENT_2 = AgentExecutor.from_agent_and_tools(
    agent=agent_core_2, 
    tools=tools_remote, 
    verbose=True, 
    # memory=memory,
    handle_parsing_errors = True,
    max_iterations = int(os.environ["max_iterations"]),
    early_stopping_method="generate",
    )


agent_OPENAI_MULTI = AgentExecutor.from_agent_and_tools(
    agent=agent_core_openai, 
    tools=tools, 
    verbose=True, 
    # memory=memory_openai,
    handle_parsing_errors = True,
    max_iterations = int(os.environ["max_iterations"]),
    early_stopping_method="generate",
    )

# agent_OPENAI_MULTI_2 = AgentExecutor.from_agent_and_tools(
#     agent=agent_core_openai_2, 
#     tools=tools, 
#     verbose=True, 
#     # memory=memory_openai,
#     handle_parsing_errors = True,
#     max_iterations = int(os.environ["max_iterations"]),
#     early_stopping_method="generate",
#     )


# agent.max_execution_time = int(os.getenv("max_iterations"))
# agent.handle_parsing_errors = True
# agent.early_stopping_method = "generate"


def SetAgent(Choice):
    global agent
    global CurrentAgent
    if Choice =='Zero Short Agent':
        agent = agent_ZEROSHOT_AGENT
        print("Set to:", Choice)
    elif Choice =='Zero Short React':
        agent = agent_ZEROSHOT_REACT
        print("Set to:", Choice)
    elif Choice =='OpenAI Multi':
        agent = agent_OPENAI_MULTI
        print("Set to:", Choice)
    elif Choice =='Zero Short React 2':
        agent = agent_ZEROSHOT_REACT_2
        print("Set to:", Choice)
    elif Choice =='Zero Short Agent 2':
        agent = agent_ZEROSHOT_AGENT_2
        print("Set to:", Choice)
    elif Choice == "None":
        agent = None
        print("Set to:", Choice)
    elif Choice =='Conversation Agent':
        agent = agent_CONVERSATION
        print("Set to:", Choice)
    elif Choice =='Code Interpreter':
        agent = agent_CODE_INTERPRETER
        print("Set to:", Choice)
    elif Choice =='Structured Zero Short Agent':
        agent = agent_STRUCTURED_ZEROSHOT_REACT
        print("Set to:", Choice)
    
    CurrentAgent = Choice
    return CurrentAgent

        



global agent
Choice = os.environ["agent_type"]
SetAgent(Choice)


# agent = agent_ZEROSHOT_AGENT


# print(agent.agent.llm_chain.prompt.template)
# print(agent.agent.llm_chain.prompt)

global vectordb
# vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
global vectordb_p
vectordb_p = Pinecone.from_existing_index(index_name, embeddings)

# loader = DirectoryLoader('./documents', glob='**/*.txt')
# documents = loader.load()
# text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=200)
# split_docs = text_splitter.split_documents(documents)
# print(split_docs)
# vectordb = Chroma.from_documents(split_docs, embeddings, persist_directory='db')



# question = "what is LCDV ?"
# rr = vectordb.similarity_search(query=question, k=4)
# vectordb.similarity_search(question)
# print(type(rr))
# print(rr)
def chathmi(message, history1):
    # response = "I don't know"
    # print(message)
    response, source = QAQuery_p(message)
    time.sleep(0.3)
    print(history1)
    yield response
    # yield history

def chathmi2(message, history):
    global Audio_output
    try:
        output = agent.run(message)
        time.sleep(0.3)
        response = output
        yield response
        print ("response of chatbot:", response)
        print ("\n")
        # real_content = response[-1:]
        # print("real_content", real_content)
        try:
            temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
            file_name = temp.split(")")[0]
            print("file_name:", file_name)
            dis_audio = []
            dis_audio.append(file_name)
            # yield dis_audio
            yield dis_audio
        except:
            pass
        if len(Audio_output) > 0:
            # time.sleep(0.5)        
            # yield Audio_output
            Audio_output = []
        
        print("History: ", history)
        print("-" * 20)
        print("-" * 20)
    except Exception as e:
        print("error:", e)

    # yield history
# chatbot = gr.Chatbot().style(color_map =("blue", "pink"))
# chatbot = gr.Chatbot(color_map =("blue", "pink"))

def func_upload_file(files, chat_history2):
    global file_list_loaded
    file_list_loaded = []
    print(files)
    for unit in files:
        file_list_loaded.append(unit.name)
    # file_list_loaded = files
    print(file_list_loaded)
    # print(chat_history)
    # test_msg = ["Request Upload File into DB", "Operation Ongoing...."]
    
    # chat_history.append(test_msg)
    for file in files:
        chat_history2 = chat_history2 + [((file.name,), None)]
    yield chat_history2
    if os.environ["SYS_Upload_Enable"] == "1":
        UpdateDb()
    test_msg = ["Request Upload File into DB", "Operation Finished"]
    chat_history2.append(test_msg)
    yield chat_history2


def Summary_upload_file(files, chat_history2):
    global file_list_loaded
    file_list_loaded = []
    for unit in files:
        file_list_loaded.append(unit.name)
    # file_list_loaded = files
    print(file_list_loaded)
    # print(chat_history)
    # test_msg = ["Request Upload File into DB", "Operation Ongoing...."]
    
    # chat_history.append(test_msg)
    for file in files:
        chat_history2 = chat_history2 + [((file.name,), None)]
    yield chat_history2
    if os.environ["SYS_Upload_Enable"] == "1":
        sumtext = SummarizeDoc()
    test_msg = [None, sumtext]
    chat_history2.append(test_msg)
    yield chat_history2


def User_Upload_file(files, chat_history2):
    global file_list_by_user
    file_list_by_user = []
    for unit in files:
        file_list_by_user.append(unit.name)
    # file_list_loaded = files
    print(file_list_by_user)
    # print(chat_history)
    # test_msg = ["Request Upload File into DB", "Operation Ongoing...."]
    
    chat_history2 = chat_history2 + [("Updated Files:\n", None)]
    yield chat_history2
    # chat_history.append(test_msg)
    for file in files:
        chat_history2 = chat_history2 + [((file.name,), None)]
    yield chat_history2

class Logger:
    def __init__(self, filename):
        self.terminal = sys.stdout
        self.log = open(filename, "w")

    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)
        
    def flush(self):
        self.terminal.flush()
        self.log.flush()
        
    def isatty(self):
        return False    

sys.stdout = Logger("output.log")

def read_logs():
    sys.stdout.flush()
    with open("output.log", "r") as f:
        return f.read()




global record
record = []

def LinkElement(chatbot_history):
    '''
    Link chatbot display output with other UI
    '''
    global record
    if record != chatbot_history:
        last_response = chatbot_history[-1:][1]
        print("last response:", last_response)
        record = chatbot_history
        print(chatbot_history)
        # print("link element test")
    else:
        print("From linkelement: ", chatbot_history)
        pass

def chathmi3(message, history2):
    global last_request
    global Filename_Chatbot
    global agent
    # global ChatbotHistory
    print("Input Message:", message)
    last_request = message
    history2 = history2 + [(message, None)]
    yield ["", history2]
    try:
        if agent is not None:
            # response = agent.run(message)
            temp = agent({'file_list_by_user':file_list_by_user, 'input': message})
            response = temp['output']
        elif agent is None:
            response = asyncio.run(start_playwright(message))
        time.sleep(0.1)
        history2 = history2 + [(None, response)]
        yield ["", history2]
        print ("response of chatbot:", response)
        # real_content = response[-1:]
        # print("real_content", real_content)
        try:
            # temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
            file_names = CheckFileinResp(response)
            print("file_name:", file_names)
            if file_names != []:
                for file_name in file_names:
                    if file_name != "":
                        history2 = history2 + [(None, (file_name, file_name))]
                        Filename_Chatbot = file_name
                        yield ["", history2]
            else:
                print("No File Found in Response")
        except Exception as e:
            print("No need to add file in chatbot:", e)        

    except Exception as e:
        print("chathmi3 error:", e)  
     
    # history = history + [(message, None)]
    
    print("History2 in chathmi3: ", history2)
    print("-" * 20)
    print("-" * 20)

def CheckFileinResp(response):
    Filelist = []
    try:
        pattern = r'sample-(?:\d{8})-(?:\d{6})\.wav'
        result = re.findall(pattern, response)
        print("wav file in response:", result)
        for item in result:
            Filelist.append(item)
        
    except Exception as e:
        print("No wav found:", e)

    try:
        pattern = r"(?i)'?([\w./]*\w+\.(?:pptx|docx|doc|xlsx|txt|png|jpg))'?"
        result = re.findall(pattern, response)
        # print("Other file in response:", result)
        for item in result:
            if '/' in item:
                item = item.split('/')[-1]
            Filelist.append(item)
            print("Other file in response:", item)
    except Exception as e:
        print("No other file found:", e)

    try:
        listWord = ['(https://example.com/', '(sandbox:/']
        for item in listWord:
            if item in response:
                file = response.split(item)[-1].split(")")[0]
                print("File found:", file)
                Filelist.append(file)
            else:
                continue
        # return "N/A"
    except Exception as e:
        # return "N/A"
        print("no file with", listWord)
    
    return Filelist

def chathmi4(message, history2):
    global last_request
    global Filename_Chatbot
    global agent
    # global ChatbotHistory
    print("Input Message:", message)
    
    last_request = message
    history2 = history2 + [(message, None)]
    yield ["", history2, gr.update(visible = False), gr.update(visible = True)]
    # yield ["", history2, "SUBMIT", "STOP"]
    try:
        if agent is not None:

            # response = agent.run(message)
            temp = agent({'file_list_by_user':file_list_by_user, 'input': message})
            response = temp['output']
            # print("chathmi4 response:", response)
            # test callback
            
            # temp = []
            # for next_token, content in stream(message):
            #     temp = temp + content
            #     history_int = history2 + [(None, temp)]
            #     yield(None, history_int, None, None)
        elif agent is None:
            response = asyncio.run(start_playwright(message))

        time.sleep(0.1)
        history2 = history2 + [(None, response)]
        yield ["", history2, gr.update(visible = True), gr.update(visible = False)]


        # yield ["", history2, None, None]
        print ("response of chatbot:", response)
        # real_content = response[-1:]
        # print("real_content", real_content)
        # try:
        #     # temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
        #     file_name = CheckFileinResp(response)
        #     print("file_name:", file_name)
        #     if file_name != "N/A":
        #         history2 = history2 + [(None, (file_name,))]
        #         Filename_Chatbot = file_name    
        #         yield ["", history2, "SUBMIT", "STOP"]        
        # except Exception as e:
        #     print("No need to add file in chatbot:", e)        

        try:
            # temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
            file_names = CheckFileinResp(response)
            print("file_name:", file_names)
            if file_names != []:
                for file_name in file_names:
                    if file_name != "":
                        history2 = history2 + [(None, (file_name, file_name))]
                        Filename_Chatbot = file_name
                        yield ["", history2, "SUBMIT", "STOP"]
            else:
                print("No File Found in Response")
        except Exception as e:
            print("No need to add file in chatbot:", e)    



    except Exception as e:
        print("chathmi4 error:", e)

    # history = history + [(message, None)]
        
    print("History2: ", history2)
    print("-" * 20)
    print("-" * 20)

def chathmi5(message, history2):
    global last_request
    global Filename_Chatbot
    global agent
    # global ChatbotHistory
    print("Input Message:", message)
    
    last_request = message
    history2 = history2 + [(message, None)]
    yield ["", history2, gr.update(visible = False), gr.update(visible = True)]
    # yield ["", history2, "SUBMIT", "STOP"]
    try:
        if agent is not None:

            # response = agent.run(message)
            # test callback
            
            temp = ""
            for next_token, content in stream(message):
                temp = temp + content
                response = temp
                history_int = history2 + [(None, temp)]
                history2 = history_int
                yield(None, history_int, None, None)
        elif agent is None:
            response = asyncio.run(start_playwright(message))
            history2 = history2 + [(None, response)]
        
        time.sleep(0.1)
        yield ["", history2, gr.update(visible = True), gr.update(visible = False)]
        
        # yield ["", history2, None, None]
        print ("response of chatbot:", response)
        # real_content = response[-1:]
        # print("real_content", real_content)
        # try:
        #     # temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
        #     file_name = CheckFileinResp(response)
        #     print("file_name:", file_name)
        #     if file_name != "N/A":
        #         history2 = history2 + [(None, (file_name,))]
        #         Filename_Chatbot = file_name    
        #         yield ["", history2, "SUBMIT", "STOP"]        
        # except Exception as e:
        #     print("No need to add file in chatbot:", e)        

        try:
            # temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
            file_names = CheckFileinResp(response)
            print("file_name:", file_names)
            if file_names != []:
                for file_name in file_names:
                    if file_name != "":
                        history2 = history2 + [(None, (file_name, file_name))]
                        Filename_Chatbot = file_name
                        yield ["", history2, "SUBMIT", "STOP"]
            else:
                print("No File Found in Response")
        except Exception as e:
            print("No need to add file in chatbot:", e)    



    except Exception as e:
        print("chathmi4 error:", e)

    # history = history + [(message, None)]
        
    print("History2: ", history2)
    print("-" * 20)
    print("-" * 20)


def chatremote(message, history2):
    global last_request
    global Filename_Chatbot
    print("Input Message:", message)
    last_request = message
    history2 = history2 + [(message, None)]
    yield ["", history2, gr.update(visible = False), gr.update(visible = True)]
    # yield ["", history2, "SUBMIT", "STOP"]
    try:
        # response = agent.run(message)
        response = asyncio.run(start_playwright(message))
        time.sleep(0.1)
        history2 = history2 + [(None, response)]
        yield ["", history2, gr.update(visible = True), gr.update(visible = False)]
        # yield ["", history2, None, None]
        print ("response of chatbot remote:", response)
        # real_content = response[-1:]
        # print("real_content", real_content)
        try:
            temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
            file_name = temp.split(")")[0]
            print("file_name:", file_name)
            history2 = history2 + [(None, (file_name,))]
            Filename_Chatbot = file_name
            yield ["", history2, "SUBMIT", "STOP"]
        except:
            print("No need to add file in chatbot")        

    except Exception as e:
        print("chathmi remote error:", e)

    # history = history + [(message, None)]
    
    print("History2: ", history2)
    print("-" * 20)
    print("-" * 20)
    
def fake(message, history4):
    pass


def clearall():
    global memory2
    global memory3
    global ChatbotHistory
    ChatbotHistory = []
    try:
        memory2.clear()
        memory3.clear()
    except Exception as e:
        print("memory error:", e) 
    # memory_openai.clear()
    global Filename_Chatbot
    Filename_Chatbot = []
    # file_path = "output.log"
    # if os.path.isfile(file_path):
    #     os.remove(file_path)
    # with open(file_path, "w") as file:
    #     print(f"File '{file_path}' has been created.")
    return [[], gr.update(visible=True), gr.update(visible=False), []]

def retry(history3):
    global last_request
    global Filename_Chatbot
    print("last_request", last_request)
    message = last_request
    history3 = history3 + [(message, None)]
    yield history3
    
    try:
        if agent is not None:
            response = agent.run(message)
        elif agent is None:
            response = asyncio.run(start_playwright(message))
        time.sleep(0.1)
        history3 = history3 + [(None, response)]
        print ("response of chatbot:", response)
        yield history3
        
        # real_content = response[-1:]
        # print("real_content", real_content)
        try:
            # temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
            # file_name = temp.split(")")[0]
            # print("file_name:", file_name)
            # history3 = history3 + [(None, (file_name,))]
            # yield history3
            file_names = CheckFileinResp(response)
            print("file_name:", file_names)
            if file_names != []:
                for file_name in file_names:
                    if file_name != "":
                        history2 = history2 + [(None, (file_name, file_name))]
                        Filename_Chatbot = file_name
                        yield history3
            else:
                print("No File Found in Response")
        except:
            print("No need to add file in chatbot")        

    except Exception as e:
        print("Retry error:", e)      
    # yield chathmi3(last_request, chatbot_history)

def display_input(message, history2):
    global last_request
    print("Input Message:", message)
    last_request = message
    history2 = history2 + [(message, None)]
    return history2
    
def Inference_Agent(history_inf):
    global last_request
    message = last_request
    try:
        response = agent.run(message)
        time.sleep(0.1)
        history_inf = history_inf + [(None, response)]
        return ["",history_inf]   
    except Exception as e:
        print("error:", e)

def ClearText():
    return ""

def playsound1():
    global Filename_Chatbot
    print("playsound1: ", Filename_Chatbot)
    try:
        if Filename_Chatbot.split(".")[1] == 'wav':
            soundfilename = Filename_Chatbot
            print("soundfilename:", soundfilename)
            # return None
            # Filename_Chatbot = ""
            return gr.update(value = soundfilename)
            # return soundfilename
            # yield soundfilename
    except Exception as e:
        print("playsound error:", e)
        return None

def playsound2():
    global Filename_Chatbot
    try:
        if Filename_Chatbot.split(".")[1] == 'wav':
            soundfilename = Filename_Chatbot
            print("soundfilename:", soundfilename)
            # return None
            # playsound(soundfilename)
            mixer.init()
            mixer.music.load(soundfilename)
            mixer.music.play()
    except Exception as e:
        print("playsound2 error:", e)
        return None
    
def HMI_Runing():
    return [gr.update(visible=False), gr.update(visible=True)]

def HMI_Wait():
    return [gr.update(visible=True), gr.update(visible=False)]


def ClearAudio():
    print("clear audio ...")
    
    return None

def Text2Sound_HMI():
    global last_answer
    global Filename_Chatbot
    global Audio_output
    print("Last answer in Text2Sound_HMI", last_answer)
    # text_to_speech_2(last_answer)
    text_to_speech_loc2(last_answer)
    Filename_Chatbot = Audio_output[-1]
    print("Filename_Chatbot in Text2Sound_HMI", Filename_Chatbot)
    # try:
    #     if Filename_Chatbot.split(".")[1] == 'wav':
    #         soundfilename = Filename_Chatbot
    #         print("soundfilename:", soundfilename)
    #         # return None
    #         return gr.update(value = soundfilename)
    #         # return soundfilename
    #         # yield soundfilename
    # except Exception as e:
    #     print("playsound error:", e)
    #     return None

def UpdateChatbot(Running_history):
    if Running_history==[]:

        timestr = time.strftime("%Y-%m-%d-%H:%M:%S")
    # # Running_history = Running_history + [(None, 'Timestamp: '+timestr)]
    # # yield Running_history
        WelcomeStr = """
        This is AI Assistant powered by MECH Core Team and it is connected remotely with GPT4. The following function is available for you.
        1. Free Chat with AI assistant
        2. Search Information and Engineering Data: Vector Database + Internet
        3. Make specific task with tools: Text to Sound | Sound to Text | Doc summary
        4. Code interpret: very powerful to modify/create/analyze documents (90%)
        5. Text to Image | Image to Text: (forecast)
        """
        Running_history = Running_history + [(None, timestr+'\n'+WelcomeStr)]
    # ChatbotHistory = ChatbotHistory + [(None, timestr+'\n'+WelcomeStr)]
    yield [Running_history, Running_history]

def UpdateChatbot2(Running_history):
    '''
    Not used
    '''
    global ChatbotHistory
    timestr = time.strftime("%Y-%m-%d-%H:%M:%S")
    # Running_history = Running_history + [(None, 'Timestamp: '+timestr)]
    # # yield Running_history
    WelcomeStr = """
    This is AI Assistant powered by MECH Core Team.
    It is connected remotely with GPT4. The following function is available for you.
    1. Free Chat with AI assistant
    2. Search Information and Engineering Data: Vector Database + Internet
    3. Make specific task with tools:
        - Text to Sound
        - Sound to Text
        - Doc summary
        - Code interpret (Beta version)
        - Text to Image (forecast)
    """
    # # Running_history = Running_history + [(None, timestr+'\n'+WelcomeStr)]
    ChatbotHistory = ChatbotHistory + [(None, timestr+'\n'+WelcomeStr)]
    yield ChatbotHistory


global last_answer
last_answer = ""

def SingleTalk(WavFile, history5):
    global last_request
    global last_answer
    global Filename_Chatbot
    ConvertText = speech_to_text_loc(WavFile)
    last_request = ConvertText
    # ConvertText = speech_to_text(WavFile)
    history5 = history5 + [(ConvertText, None)]
    yield [None, None, history5]
    message = ConvertText
    history2 = history5
    try:
        response = agent.run(message)
        time.sleep(0.1)
        last_answer = response
        history2 = history2 + [(None, response)]
        yield [None, None, history2]
        # yield ["", history2, None, None]
        print ("response of chatbot:", response)
        # real_content = response[-1:]
        # print("real_content", real_content)
        try:
            # file_name = CheckFileinResp(response)
            # print("file_name:", file_name)
            # if file_name != "N/A":
            #     history2 = history2 + [(None, (file_name,))]
            #     Filename_Chatbot = file_name
            #     yield [None, None, history2]
            file_names = CheckFileinResp(response)
            print("file_name:", file_names)
            if file_names != []:
                for file_name in file_names:
                    if file_name != "":
                        history2 = history2 + [(None, (file_name, file_name))]
                        Filename_Chatbot = file_name
                        yield [None, None, history2]
        except Exception as e:
            print("No need to add file in chatbot:", e)

    except Exception as e:
        print("chathmi3 SingleTalk error:", e)

    # history = history + [(message, None)]
    print("History2 in Simple Talk: ", history2)
    print("-" * 20)
    print("-" * 20)


def vote(data: gr.LikeData):
    if data.liked:
        print("You upvoted this response: " + data.value)
    else:
        print("You downvoted this response: " + data.value)

with gr.Blocks() as demo:
    # gr.Markdown("Start typing below and then click **SUBMIT** to see the output.")
    # main = gr.ChatInterface(
    #     fake,
    #     title="STLA BABY - YOUR FRIENDLY GUIDE",
    #     description= "v0.3: Powered by MECH Core Team",
    # )
    # main.textbox.submit(chathmi3, [main.textbox, main.chatbot], [main.textbox, main.chatbot])
    UserRecord = gr.State([])
    # UserRecord.append()
    # timestr = time.strftime("%Y-%m-%d-%H:%M:%S")
    # # Running_history = Running_history + [(None, 'Timestamp: '+timestr)]
    # # # yield Running_history
    # WelcomeStr = """
    # This is AI Assistant powered by MECH Core Team.
    # It is connected remotely with GPT4. The following function is available for you.
    # 1. Free Chat with AI assistant
    # 2. Search Information and Engineering Data: Vector Database + Internet
    # 3. Make specific task with tools:
    #     - Text to Sound
    #     - Sound to Text
    #     - Doc summary
    #     - Code interpret (Beta version)
    #     - Text to Image (forecast)
    # """
    # # Running_history = Running_history + [(None, timestr+'\n'+WelcomeStr)]
    # UserRecord = UserRecord + [(None, timestr+'\n'+WelcomeStr)]
    # UserRecord.append((None, timestr+'\n'+WelcomeStr))
    with gr.Column() as main2:
        title = gr.Markdown("""# <center> STLA BABY - YOUR FRIENDLY GUIDE
                            <center> v0.7.12: Powered by MECH Core Team - GPT4 REMOTE MODE"""),
        chatbot = gr.Chatbot(
            # avatar_images=((os.path.join(os.path.dirname(__file__),"User.png")), (os.path.join(os.path.dirname(__file__),"AI.png"))),
        )
        
        with gr.Row():
            AddFile_button = gr.UploadButton("⤴️ File", file_count="multiple", scale= 0, variant="secondary",size='sm')
            inputtext = gr.Textbox(
                scale= 4, 
                label="",
                placeholder = "Input Your Question",
                show_label = False,
                )
            submit_button = gr.Button("SUBMIT", variant="primary", visible=True)
            stop_button = gr.Button("STOP", variant='stop', visible=False)

        with gr.Row():

            agentchoice = gr.Dropdown(
                # choices=['Zero Short Agent','Zero Short React','OpenAI Multi',
                #          'Zero Short React 2','Zero Short Agent 2','None','Conversation Agent',
                #          'Code Interpreter', 'Structured Zero Short Agent'],
                choices=['None','Zero Short React 2','Structured Zero Short Agent'],                
                label="SELECT AI AGENT",
                scale= 2,
                show_label = True,
                value=os.environ["agent_type"],
            )
            voice_input = gr.Audio(
                source="microphone", 
                type="filepath", 
                scale= 1,
                label= "INPUT",
                )
            voice_output = gr.Audio(
                source="microphone", 
                type="filepath", 
                scale= 1, 
                interactive=False,
                autoplay= True,
                label= "OUTPUT",
                )
            # with gr.Column():
            upload_button = gr.UploadButton("✡️ INGEST DB", file_count="multiple", scale= 0, variant="secondary")
            summary_file_button = gr.UploadButton("📁 SUM DOC", file_count="multiple", scale= 0, variant="secondary")
            # with gr.Column():
            retry_button = gr.Button("RETRY")
            clear_button = gr.Button("CLEAR")
        with gr.Accordion(
            label = "LOGS",
            open = False,
            ):
            # logs = gr.Textbox()
            frash_logs = gr.Button("Update Logs ...")
            logs = gr.Textbox(max_lines = 25)
    
    """
    GUI Func
    """
    AddFile_button.upload(User_Upload_file, [AddFile_button, chatbot], chatbot)
    # upload_button.upload(func_upload_file, [upload_button, main.chatbot], main.chatbot)
    chatbot.like(vote, None, None)
    retry_button.click(retry, chatbot, chatbot).success(playsound1, None, voice_output).\
        success(HMI_Wait, None, [submit_button, stop_button])#.\
        # success(ClearAudio, None, voice_output)
    # inf1 = inputtext.submit(chathmi3, [inputtext, chatbot], [inputtext, chatbot]).\
    #     then(playsound, None, voice_output)
    # inf1 = inputtext.submit(chathmi3, [inputtext, chatbot], [inputtext, chatbot]).\
    #     then(HMI_Runing, None, [submit_button, stop_button]).\
    #     then(playsound, None, voice_output).\
    #     then(HMI_Wait, None, [submit_button, stop_button])
    # inf4 = inputtext.submit(chathmi4, [inputtext, chatbot], [inputtext, chatbot, submit_button, stop_button])
    ''' open ai | new'''
    # chathmi4 = normal, chathmi5 = callback
    inf4 = inputtext.submit(chathmi4, [inputtext, chatbot], [inputtext, chatbot, submit_button, stop_button]).\
        success(playsound1, None, voice_output, queue=True)#.\
        # success(ClearAudio, None, voice_output)

    ''' Test '''
    # inf4 = inputtext.submit(chatremote, [inputtext, chatbot], [inputtext, chatbot, submit_button, stop_button]).\
    #     success(playsound1, None, voice_output)

    inf3 = submit_button.click(chathmi3, [inputtext, chatbot], [inputtext, chatbot]).\
        success(HMI_Runing, None, [submit_button, stop_button], queue=True).\
        success(playsound1, None, voice_output, queue=True).\
        success(HMI_Wait, None, [submit_button, stop_button], queue=True)#.\
        # success(ClearAudio, None, voice_output)
    
    # inf2 = inputtext.submit(display_input, [inputtext, chatbot], chatbot).\
    #     then(Inference_Agent, chatbot, [inputtext, chatbot])
    stop_button.click(read_logs, None, logs, cancels=[inf4,inf3]).\
        then(HMI_Wait, None, [submit_button, stop_button], queue=True)
    # stop_button.click(read_logs, None, logs, cancels=[inf2])
    upload_button.upload(func_upload_file, [upload_button, chatbot], chatbot)
    sum1 = summary_file_button.upload(Summary_upload_file, [summary_file_button, chatbot], chatbot)
    agentchoice.change(SetAgent, agentchoice, None)
    frash_logs.click(read_logs, None, logs)
    voice_input.stop_recording(SingleTalk, [voice_input, chatbot], [voice_input, voice_output, chatbot], queue=True).\
        success(Text2Sound_HMI,None,None, queue=True).\
        success(playsound1, None, voice_output, queue=True) #.\
        # success(HMI_Wait, None, [submit_button, stop_button]).\
        # success(ClearAudio, None, voice_output)
    clear_button.click(clearall, None, [chatbot, submit_button, stop_button], voice_output, cancels=[inf4,inf3,sum1])
    # voice_output.end(ClearAudio, None, voice_output)
    # def clear_voice():
    #     print("clear audio ...")
    #     voice_output.clear()

    # voice_output.play(clear_voice, None, None)

    # demo.load(read_logs, None, logs, every=1)
    demo.load(UpdateChatbot, UserRecord, [chatbot, UserRecord])
    # load(UpdateChatbot, chatbot, chatbot, every=5)

# demo(api_name="Update_Chatbot")



# demo = gr.Interface(
#     chathmi,
#     ["text", "state"],
#     [chatbot, "state"],
#     allow_flagging="never",
# )

def CreatDb_P():
    global vectordb_p
    index_name = 'stla-baby'
    loader = DirectoryLoader('./documents', glob='**/*.txt')
    documents = loader.load()
    text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=200)
    split_docs = text_splitter.split_documents(documents)
    print(split_docs)
    pinecone.Index(index_name).delete(delete_all=True, namespace='')
    vectordb_p = Pinecone.from_documents(split_docs, embeddings, index_name = "stla-baby")
    print("Pinecone Updated Done")
    print(index.describe_index_stats())

def QAQuery_p(question: str):
    global vectordb_p
    global agent
    global Choice
    global CurrentAgent
    # vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
    retriever = vectordb_p.as_retriever()
    retriever.search_kwargs['k'] = int(os.environ["search_kwargs_k"])
    # retriever.search_kwargs['fetch_k'] = 100
    # if agent == agent_ZEROSHOT_REACT_2 or agent == agent_ZEROSHOT_AGENT_2:
    if CurrentAgent in ListAgentWithRemoteGPT:
        print("--------------- QA with Remote --------------")
        qa = RetrievalQA.from_chain_type(llm=GPTfake, chain_type="stuff", 
                                        retriever=retriever, return_source_documents = True,
                                        verbose = True)
    else:
        print("--------------- QA with API --------------")
        qa = RetrievalQA.from_chain_type(llm=chat, chain_type="stuff", 
                                        retriever=retriever, return_source_documents = True,
                                        verbose = True)
    # qa = VectorDBQA.from_chain_type(llm=chat, chain_type="stuff", vectorstore=vectordb, return_source_documents=True)
    # res = qa.run(question)
    res = qa({"query": question})
    
    print("-" * 20)
    # print("Question:", question)
    # print("Answer:", res)
    # print("Answer:", res['result'])
    print("-" * 20)
    # print("Source:", res['source_documents'])
    response = res['result']
    # response = res['source_documents']
    source = res['source_documents']
    return response, source

# def CreatDb():
#     '''
#     Funtion to creat chromadb DB based on with all docs
#     '''
#     global vectordb
#     loader = DirectoryLoader('./documents', glob='**/*.txt')
#     documents = loader.load()
#     text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=200)
#     split_docs = text_splitter.split_documents(documents)
#     print(split_docs)
#     vectordb = Chroma.from_documents(split_docs, embeddings, persist_directory='db')
#     vectordb.persist()

def QAQuery(question: str):
    global vectordb
    # vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
    retriever = vectordb.as_retriever()
    retriever.search_kwargs['k'] = 3
    # retriever.search_kwargs['fetch_k'] = 100

    qa = RetrievalQA.from_chain_type(llm=chat, chain_type="stuff", retriever=retriever, return_source_documents = True)
    # qa = VectorDBQA.from_chain_type(llm=chat, chain_type="stuff", vectorstore=vectordb, return_source_documents=True)
    # res = qa.run(question)
    res = qa({"query": question})
    
    print("-" * 20)
    print("Question:", question)
    # print("Answer:", res)
    print("Answer:", res['result'])
    print("-" * 20)
    print("Source:", res['source_documents'])
    response = res['result']
    return response

# Used to complete content
def completeText(Text): 
    deployment_id="Chattester"
    prompt = Text
    completion = openai.Completion.create(deployment_id=deployment_id,
                                        prompt=prompt, temperature=0)                              
    print(f"{prompt}{completion['choices'][0]['text']}.")

# Used to chat
def chatText(Text): 
    deployment_id="Chattester"
    conversation = [{"role": "system", "content": "You are a helpful assistant."}]
    user_input = Text
    conversation.append({"role": "user", "content": user_input})
    response = openai.ChatCompletion.create(messages=conversation,
        deployment_id="Chattester")
    print("\n" + response["choices"][0]["message"]["content"] + "\n")

def GUI_launcher():
    if SysLock == "1":
        demo.queue(concurrency_count=3).launch(auth=(username, password), server_name="0.0.0.0", server_port=7860)
    else:
        demo.queue(concurrency_count=3).launch(server_name="0.0.0.0", server_port=7860)    


if __name__ == '__main__':
    # chatText("what is AI?")
    # CreatDb()
    # QAQuery("what is COFOR ?")
    # CreatDb_P()
    # QAQuery_p("what is PDP ?")
    # question = "what is PDP?"
    # output = asyncio.run(start_playwright(question))
    
    # asyncio.run(TestCodeInterpret('Plot the bitcoin chart of 2023 YTD'))

    GUI_launcher()
    # if SysLock == "1":
    #     demo.queue(concurrency_count=3).launch(auth=(username, password), server_name="0.0.0.0", server_port=7860)
    # else:
    #     demo.queue(concurrency_count=3).launch(server_name="0.0.0.0", server_port=7860)
    pass