wanghuaqiang
commited on
Commit
•
4bbf816
1
Parent(s):
99eb82b
changed README.md
Browse files
README.md
CHANGED
@@ -1 +1,47 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!--
|
2 |
+
* @Description:
|
3 |
+
* @Version:
|
4 |
+
* @Author: Hardy
|
5 |
+
* @Date: 2022-02-09 15:13:53
|
6 |
+
* @LastEditors: Hardy
|
7 |
+
* @LastEditTime: 2022-02-09 16:59:01
|
8 |
+
-->
|
9 |
+
|
10 |
+
<br />
|
11 |
+
<p align="center">
|
12 |
+
<h1 align="center">clip-product-title-chinese</h1>
|
13 |
+
</p>
|
14 |
+
|
15 |
+
## 基于有赞商品图片和标题语料训练的clip模型。
|
16 |
+
|
17 |
+
|
18 |
+
## Usage
|
19 |
+
使用模型前,请 git clone https://github.com/youzanai/trexpark.git
|
20 |
+
|
21 |
+
|
22 |
+
```python
|
23 |
+
import torch
|
24 |
+
from src.clip.clip import ClipProcesserChinese, ClipChineseModel
|
25 |
+
import requests
|
26 |
+
from PIL import Image
|
27 |
+
|
28 |
+
clip_processor = ClipProcesserChinese.from_pretrained('youzanai/clip-product-title-chinese')
|
29 |
+
model = ClipChineseModel.from_pretrained('youzanai/clip-product-title-chinese')
|
30 |
+
|
31 |
+
url = 'http://img.yzcdn.cn/upload_files/2015/04/21/0140dac4657f874f2acff9294b28088c.jpg'
|
32 |
+
img = Image.open(requests.get(url, stream=True).raw).convert('RGB')
|
33 |
+
imgs = [img]
|
34 |
+
texts = ['运动鞋', '红色连衣裙', '黑色连衣裙', '大衣', '文具']
|
35 |
+
|
36 |
+
f = clip_processor(texts, imgs, return_tensors='pt', truncation=True, padding=True)
|
37 |
+
del f['token_type_ids']
|
38 |
+
with torch.no_grad():
|
39 |
+
out = model(**f)
|
40 |
+
logits_per_image, logits_per_text = out['logits_per_image'], out['logits_per_text']
|
41 |
+
|
42 |
+
print(logits_per_image.softmax(dim=-1).cpu().detach().numpy())
|
43 |
+
|
44 |
+
# 结果: [[1.1700666e-07 9.9948394e-01 5.1582896e-04 4.7687358e-11 6.9604440e-08]]
|
45 |
+
```
|
46 |
+
|
47 |
+
|