|
333 | 333 | "venue": "NeurIPS 2023", |
334 | 334 | "links": { |
335 | 335 | "pdf": "https://arxiv.org/abs/2304.13013" |
336 | | - } |
| 336 | + }, |
| 337 | + "thumbnail": "/lowpres.png" |
337 | 338 | }, |
338 | 339 | { |
339 | 340 | "title": "Neural Priming for Sample-Efficient Adaptation", |
|
352 | 353 | "links": { |
353 | 354 | "pdf": "https://arxiv.org/abs/2306.10191", |
354 | 355 | "code": "https://github.com/RAIVNLab/neural-priming" |
355 | | - } |
| 356 | + }, |
| 357 | + "thumbnail": "/priming.png" |
356 | 358 | }, |
357 | 359 | { |
358 | 360 | "title": "Quilt-1M: One Million Image-Text Pairs for Histopathology", |
|
389 | 391 | "links": { |
390 | 392 | "pdf": "https://arxiv.org/abs/2305.03689", |
391 | 393 | "project page": "https://cs-people.bu.edu/array/research/cola/" |
392 | | - } |
| 394 | + }, |
| 395 | + "thumbnail": "/cola.png" |
393 | 396 | }, |
394 | 397 | { |
395 | 398 | "title": "AR2-D2:Training a Robot Without a Robot", |
|
457 | 460 | "venue": "VLDB 2023", |
458 | 461 | "links": { |
459 | 462 | "pdf": "https://arxiv.org/pdf/2301.00929" |
460 | | - } |
| 463 | + }, |
| 464 | + "thumbnail": "/equivocal.png" |
461 | 465 | }, |
462 | 466 | { |
463 | 467 | "title": "Large Language Model as Attributed Training Data Generator: A Tale of Diversity and Bias", |
|
476 | 480 | "links": { |
477 | 481 | "pdf": "https://arxiv.org/abs/2306.15895", |
478 | 482 | "code": "https://github.com/yueyu1030/AttrPrompt" |
479 | | - } |
| 483 | + }, |
| 484 | + "thumbnail": "/llm_training_data.png" |
480 | 485 | }, |
481 | 486 | { |
482 | 487 | "title": "TIFA: Text-to-Image Faithfulness Evaluation with Question Answering", |
|
494 | 499 | "links": { |
495 | 500 | "pdf": "https://arxiv.org/abs/2303.11897", |
496 | 501 | "project page": "https://tifa-benchmark.github.io/" |
497 | | - } |
| 502 | + }, |
| 503 | + "thumbnail": "/tifa.png" |
498 | 504 | }, |
499 | 505 | { |
500 | 506 | "title": "What does a platypus look like? Generating customized prompts for zero-shot image classification", |
|
0 commit comments