From 2eb5c976c210dd1d86c88bc17a0e4244f10543c3 Mon Sep 17 00:00:00 2001 From: Parthasarathy Date: Sun, 16 Nov 2025 01:33:06 +0530 Subject: [PATCH 1/8] Add OpenRouter provider configuration to Add Model Form --- gui/public/logos/openrouter.png | Bin 0 -> 20641 bytes gui/src/forms/AddModelForm.tsx | 1 + .../AddNewModel/configs/openRouterModel.ts | 107 + .../AddNewModel/configs/openRouterModels.json | 15743 ++++++++++++++++ .../pages/AddNewModel/configs/providers.ts | 24 + 5 files changed, 15875 insertions(+) create mode 100644 gui/public/logos/openrouter.png create mode 100644 gui/src/pages/AddNewModel/configs/openRouterModel.ts create mode 100644 gui/src/pages/AddNewModel/configs/openRouterModels.json diff --git a/gui/public/logos/openrouter.png b/gui/public/logos/openrouter.png new file mode 100644 index 0000000000000000000000000000000000000000..04f998a53af045ddb8a788c7c16a0aa57a6444e7 GIT binary patch literal 20641 zcmeEu_gj-$@b3u`P*Koz6_mEF>#{BgSSSVq?z(~vP)Sn^#EKLtLPSDI>}vx%Dh70; zl!OvRT4=Gd7Ql!vz1bkVRD}=-C3oU}?|tqcaDTi$kB`3RJ?}YlX6DSy`OKW0+2Y|g zbNYhm2%(u9*8jB)p(*g+DQKz={KGZ1JP!X%3thiG93j0qn!f~e>$VX>KcNkO{Xsc$ zyQd{;XgHGD7L|KZd+LeB)5d=Ic`ShR?+^0=|C+LO>!;1w%kx-xNXe*v-<1AulQB;nfU5uh=l>B0Lj8n`MCG5>6A+T>i27)znbw(RBvl&` z#sU{ zRtGstUkY3_Z$ov3ZjZPQv`(rrLJah=w5bU}aoL$w%gx~}qHR{lJNiqp$n%pL;-vRD zJI{rODHyuO@T4P3uHW54;rq5k4*6*##LJ?Ri@p>F+%gFLEU%scJ-pLMd0#kO1z+Hz zQK+D6c*ccgNd^NWvJ}nm`F|=Ny(o%;{!iqe;29ZbarS>=GWkqXvg!ytOI?M@EqGxj zY0uCyAdYvzi(G&W-f9>=AxM}B?}N_c-rnv76hn%|1!q@i5DyDG^BT(-d@s`#Du#yN zjl$$M&d3pVhx`+ehypV>ySl$5h|`%(7@2c&UyRRc>w=R zV@)&+xle7YQ;mHNa$du(mw%cDeJnFnN9`*XV=G+Ey)>V!!rB6JP5zTuid5y7tm`1?akZA@iIZVhM@Xu_+kMa3zQ4-9{jg7jWPt@ zCHV~na4wXVtEyye3!$Ys1Lqo-o^8E2iabNVPybNJwc{vC76vDG5N|uGoHRO<_u1A`)xks4qx71$ zF}LtPM}Q`?{>BYmbK(pBHB38bm=Gp`fy5NdCc$TJ$X!p0AMNz27T^HmtbaG@#St&X z$}|#?s7Id=V(KlrX~3b#@1NLWImJl)LpC3%>Ha=$pRmoS^xK!fn0GPo1?tFFx$6yF z8@i6q_lVLw|D)=aSu*=VouwzSwS0~IdNwJF=e1k*OYmmgG0h2DqPLYs4B^*Ji~k{Q zG41<4EM3@^i+r6DY1M~>8d~>YT+`B9`>L&lX5Txsd$^xlUt4zpi^9maHKUqrQ=Asi z#c!VbiE@RDzBk42aq7Hw<1f{sPDcz@H?jbGE}&;xJ?p{Z-5fRR9SOt&mr5OK-8BY+ zq-n|DsgTG-lzDxx{GFwj1l1aG>SRp5*NO=&$>AI|2U$wz30oaG)K~8+JDgVs4;2A- zr9V&;8N86*7~Y_0&Uia~lfOn$E3@2sN+Ki`lLCf{ra~tz_pujG$3!c}HI~SpqR@ULHTs%g z_(sEQo*g~*-*1VgoP&ycvQ8`Dl8$hZFq&$RWk!z;XLA&>AiRlZL;rrIPfjM_&0YP5 zsmjZKT=fG)bh>#`d}q%;&llHdpK-YnKdN2x zq^C3O+$`>5PAs^Oiw`BBc&r>=GT!F(j*gqo`p zxv3NNe#DWom839Y=8f*uKkirf!*0ne!9QSJ?+IV5dvl|1ectOz&Gx=esi3F1uN~c= zFu-+vt9v7)i2L=+a~IysRxI$@rn~My!&r-N0@_}-9EH(($KwMPrER{Kv?M~(Jw+NY+Ou+tV(LKy#52wCNItch?;9Ht@sBo( z-s(m6eN9B|TWUrPDC+x?jc;}8rM?O{cfy9Sd$hx;+HT)6yFTK<2loe48RFZNdSEQ{|TU{hbAC^csALY(dk$CC3 zN#&~|G$UdY`*BZRuB>!6V%6k#CzVEznr$`Y%?+=7FNim5u_1s4Co4uKx<*!5-AEpg~ZLIEZ1LSH{ZzEpC6q=|d~nJ&`1X)vIf+~^-t z38_@_?=M`$DC|x<*IRMM8A+EH3^;8mrPmXA_ltMqPu>28wm&Hro{?tR^M(Th3(!KJzpd?(^EG%!S0Dx9NnI3#6O65P3ihcr^Y5u-X@|qGLgt)I`ksxsS7k? z9sBN^SCYn%lqU4Zv7XCDyv3ut+3}-bcYeXzvRZ$}L2YBiDzgs;Xx=?e5c_UA5oP{e zNAsLFiCA%ZYF*EJtTU3WM+nm1kS^u0jK2V};`Kd< z=gof~Cx{h73o6$t03uLl^Y#>KWQ%G*S6Zt-;#9qti&*D&`k3A*9d;z0BS@c6BbT)s zp!!20Ssru9g!&=ncpF*rNec+q1aMUckgWr@C{0HfN$VtKqyow5k!cWw#-}W3yinjD zfly4AB?M${=c_`&>E}MS$8KVN!8PD93-^SGl9zI{#NULSl>WjiFLXd4(HX$5%y0Q| zopjy)dp8M4yu!hw>DRXKh*~1A;fY$@6{fE46JLTCPIQl^-;xZF)CjvGa_D_{6xtm6 zv&z;EYqM_Zsf*ofCdx}HI<%3rmAR8l8`oyZ&Z!NlDfMnowGc}ke5=Ezg$}K)dTK8r zz3T3E0VwR-9?stt4Z zS|DJRwWw-VEaAq|$?Ej%o!f}ebiR(_33KOf0V9;4WA_t6*ZZ&I$MAcy?LYDsr|akF zQ+!R{0#A+l&(+YXE2g$1V<9I-(IGR{MZ9YRU-37x`Ez-zgz>`3DE240H4$R-(NFji zEl278W(BuNo(&#y#~avoHTc)7OL z!f%iueCq1wje;vO*LcpE^rHtu=WuinS6FqbIX2IG>eEYdtNlz)g@)1rA=f2hAD?gaC z!t~V9)bdjCa(#rHBDW0aGTwYt5BYDJ2i%=@%tabwI9aioNDrRRY=?Pkr*L_rOD9vd zlzs{nLdcXk^|3iit*2fjOc4byV7`Ygv)+Ky>HMhPg>~yw_W}9ZB^jL5&jw=UbM;Nk z(3K8QgyV_V!WF`&C*pXeOUFIaed5x8*Mr^4q{7*3=>V~3B3nHu^1P5Oha;rZ{uLQj zqIkm*!f@F@c{gP<3^^B$sDHNWf^YM$8G}=jIKVo?V-EBv7;t$ALDM(65SCN$^{fAH zc?2#R+d1x%DjCYB!lHDF8+k6gRX3P_Wa(4tE@bz>6iEbbXZ7KGKa3NG%SXz66}ko4 z4`I-siIX0)M+hBvNCL^BhlVV!Me0|RnDaD&)B?9gs+IL$3$?jY0Kbs0Q7 zN%VSX6|h8g57wZ!&2&4T*>Vp;ecONYYIx;)w7IJGpju!@ql zO=!pQzF29rEwvJC*JlFP^M?__^?Rz}E+zzhn+Yyj9LSCsUf2HW=jCk@tKz>6QK3E8be%?wj=vXg%vAX;r@ zGuU?|G!YT89=G~Q&7}z2fb|E$Hn0MbT>?TcQ`>x*8Nhh)nmXeFUE61~~H^5y|uqdOc|#jY%U#M6fJiaSh%uw^v@dTtiTfi=4|1jb#1eG-+w{RC^d*swDr zS0sSV8rC#c`D<3(2F>h*;LIvf-;x31jBVr+PN`=6V>;s#o%li+ZVh+}L(N*|wY%p& ze)qj)1NJ%(^kZH-QD>Zx$=kjx+gYSh4?ns)aWdYFA4SOD!H)TlQW(k9?RGnP`!SU-PQH^US6HV^_svg3ZlVM4#RP~Rpyx;YwgOrtf z*Al?0#P8ofgN;gs9i!4asOePXzu=eHC-O`^X`9t}!tMN>H{Ds|l#60y7;rU-=(gn^m>;&c(W(?~HqR53*AS|$j$Mg`mW z%jWun)iTiZ*b0{OKdN9S?~8b$-9Fr}QeV^PvUBNuPCiU^D{{j3yNEqgY}*Z3>x#O; zFPg+WAXNaxb1D2S%yc*m+n$C^A752~n&a_xSek~n!yf?m&~lJY7xvjP?*j3ql1H2> zd61rTA*F3Q^R7na+)^_2DW~;XkmbKjE^u*Wdw;#GVIFU7x{o{?K&0r1!6B7=1B`N3 z7_Zc{jF}85M`T6wcureleHDp~u-fQ>eB66VFmEo6e20^vk+j)PXIRhfh-~flS^K>h zX}c%1HQL0K5V?H&|^Y@h5(g7_x~DxIDpblzE-3k&OwC>W^a=HAUUZ0+cGarb`P zDr@U+u2T2qbGb)E>MG+@?$i4<<(*G!+A31H-SkPGlfChP(xqy@+ca2yko}oF((8*Z zsIQ&e>ggFdJejFFju!}hGbi^cys!k=G<+Za4)Q}%ktC8`9a5k9xKJtdxb57wC#`y{ zrz@8m!hW%GwXy0?2v-s~Kq51AQrWGv#thi_fSo+jSDka@<&qjQAm zrgkP2&cPhq9}{`K;cFE~aRsJ!TDpaMSaMEz%zswT#K8tT4|a3J;}0LOB{k1arMVj# z$FYZ|J)9#O^{M48Lp^I0cb5F5 zC63F^@NsB)seXx{tXcHIE#O#FhRWA9%3>i zn+}gOJee^sZ?J-!Zq{Bm6>yP?&0{7+G0EBPpxVBPqj-eZ%O;K~s1xU87AuYY^XkgdCOMSZQ4}Z zXKc(kb;G!guy2*bLEIqZekrIji8=i0qRv^Z%;Wwpoyr48(I&3j@H_v_|A2XVU(WVr+Y~t%8g6f=RG4)b&H&=)q{ZGT z?KBWGa!F$rH8f$u0;c*yt&l4~V%VadUpHwnzK_wk=YgSn>W%$lFdiv#}(`p4hx zrx70b%|JNeC^k;$vD%z{_}={0lKiLtP|Jn;ANA&}&pn- zGxrCP68ft^`k5M$>( z-XS;tRWEY8Vt40i>8W z)tzpUvv_vlHA6SlHEwl>82hmP^OFxAUQ{_0heeFb2laR@rJd?s?z35RXBX`@-9SgN zpyX?Dz#G>ha0ub%EO$y9r8GD~i5M%)g>fa4COJvJ`yQuDZf+C)^%U+esJ1|d|BaqR z(cgmml}}r#?Nn3rlwDJ%gybI5A9Ke2%{66BT@ZPd;mW!G6*<=oQBWh^B2r~ zQ@3(uJQf*xtBwWknslQy$dhq9y;~u`gf%RTb6r7Rp31?i{C!EVD@&q%_IKMa23C zo;2N{PTi;9UI${LYoTEZM_|TWdh8yEAD_yr2x8YVN9IcM;Rw^y2Wb;8<-5gS_UDkv zenU}6$}ewIx74)XZpgPnJuwE|4eM&g0#@4kC^*jp|4yKCs{=ieWv0ZJov$1t4DZcv z8(AU0+9sZD>+oPf zxdCLG_&fP_rM~-KJM)$Sh>{&|++xW4lGMiu5qYOtc=Y!-YmPnxcvsE5--mB&8}%Q@ z{1=!7<={03)94M2KeCox8S~7kYWimR28oW;srwd~2NmEe4kjuW9GVUw6)}}8@TD;w zB(I1AcKiFsoIO7gQ=5kymS$W^x%I7Uc3Bi}!ljWLz&@-v)m8Na60Q26ZoOp|Dl&^pwbxk4 z0wzYNzvObe^w#~agwy{kdQFetoIqI_;& z5iK+vc|kaQhjfUOsJ@NA>yj-&wbjNVuazz=-O+wkZQHdPFQJ?21)*>J!6dQ9Xsz-BwL`fg`*c>|-mV?mvNB4D| z|GO5z+a))R!WD2Xh8VZggr5L?7721IR0n0c;_ds{6D9b8nX~G%@Yh+1b+oq6Njf7* zKgSg`S&fud7>J9UePYk)OzpuIw3u+zL3#mgE5%r<(|C2xq!OkUUkNvWpGTuzEBCu1G^cRiO0*(M!SAUkv!G!>05N#$V3!|4lIHC4eyqBfS zuh+sR(zGI*7q-UUTH2SggKp5S=J{-1?_e=jS`o2<a`|3cFaL~`t(3R`V+O+hJ=6%Z+0BW2iE?P}l9O>;%qwBCmu3T%@>&ha{ z+fotdoxE(S?TX6Bw@$-RY?V;D@uC;OsOP$Wb+^Dx`4_J#$gkAgJg4e*?~O0%T9$h} zKFmbJ9|gJ0{jSPCc}+sI{ECNhilN_Ymc{=rKJ=N2bc))C^Y3Ugv~&Ns7Y*m{k8-Y) zbm7w9-V>yE$EgbP>AubA6E&^pkhE=yKwXIW0Z>Zq<;~Nj$4WC)FJu$?ur(@LJ;-AKCxbdP{t!)I zTue&46Z1^&qbv5tK7~-L;S>Ag^?FjRk^&MtB7s;h!q0e-0YksNXj;&e2Zp>=B*uhy zLWNd$Hs=lu@?v&0;l>qDx{Xmw9pGQzdlZT#%DWsuWGvV8XtT_n=@SsA11zimcPZ+T z1=-dfKm0BbPVG0#ZOt8>;sO|#&UXh?}PZxO-am}^-GpE86egRt|h=zQf-Y>&iL3v5SaN(kP4d3Y!T(Lk;ONu$Kh?Y|IMW$!H1uD@TJ0p7#(St7bdQAifW>#4~<9 zLJWFDry7{F{Lj!!kX~3Ue7jBNHKSY)`_pLQl6+ z$^H>J@SX?@x9qwO1r@-5El4~sBYn^sU_0+1M@Mks{Zq~RFURmgSt+E8X09Z4X${m( zK2&AP$eL$Z`3&VdD1I1&q@r4byN;sks?_Auflxj1eV?u5&W5a06DMh)2KnzoC#G&EMDC$$a$0=$^dKTEK0HWs<*TMZ&K;NWVp zhc}b`G@?%ZcSbWTn<1F9T&lxyQ`~#}1^SoO%CF84{}G&Bl@%{>&j5Jht83am-b_ii z`m@XqI_UdH?ABp~JAB7Gb{O$*6Kd<5px7lFKo{{MLCp&3EsqhGG|B6#-BYg4ZtlqB z{|wQ!fVHmlA?IKD0vIBE1!)%~b438^>>3n3f={$uEg-2JCYIch^vfjxnd&XmFq zg(1w%*I)^Amyy`S84+GkE;A(Gq3Ph)MF`5MqbB&o{LlNNM5~){YDYm9n+#a5PmS|! zC4Io1S%NvMq(8)nNwNP8vL7)Y2 z&J+f8-|5Z&6b59~3uI@^Sh_V6HkoE6btIUj5W3{MvWGTci?@MYg#Pt1Aw=RGE+w6Q$A z?_Bypcs_XELk-W(D|`C?ZK{Qm1kC_k9v+Kz4Q7Wxhb&l@<>!C!#|7D2;qmr)k1kq+9|8LV+*e5g9dgT4p3k9PnkweEvtm@B?pY1IBc%gXtK0*UhB2qWu(=g1{xf zg%a6IQkMpX`k_+GHueK3LX=WlqlYaJE3ZxGawq*^-}d`C$cjJNT@5VC)dVMlSX zj19>E+7tN+eKCu>jT{h>1C#V;j7=08B-1qx7FstR$fV*=X!?&d{OXmjO8ZSbhRfx!{47P^)6R zCqkmbAchFd>?A($b=i3j3dfc=0~^&?7-B*#2RiAlX?{GrNPe28hCxqoWrg(Aky!RC`7KD9oL)&EMlpUGmtLP+z>$>O&G4o!xFc;7cB`BEvGx-TUsVQflLMkiO*y^9) zeMz9u910SE9pM76rO4^lrNQ@4TrFs*qeBJfaj~pa zW1M~guXh`~o>7ZNW0ncLen)y=8}R3XXifI#uK}I<1BNu^g8$1WDj(ph1Y@w$(>> z`sZIedDXe`l63*j?W3bLm%Si{d#D|)+lYLv*+hZZi zx_CZ+C4sR462Si3OEu3%d|LuzI|z#6W+eb-h*^S~#o#vGu_fpF++d1&Ic1u3$H=i> zEhzkGmj4A0QQO&1s4z|~>{s;%-9_Ah8DM;%#`&PT?tpzh6N7wgk` zk4q>sOHWvw(65*Qa&j%ioKtGLog$= zl(1_yud>NH^l)NNx{N%NPe{DthBtV~=2B78M5++^go&a&m^ z?YrWai?3h*aXO)1zYYo%PABw*pkgZdfMH(n@aLK^cuSjSGKEm@!KHv_hVbt~!)GXBX?FZd@w|CIj-&Ql_-R+=Djr{GR#x#Y)?X3iwtx}!4FVhcXy0KU!I-I(pW)Hq zS6ZA-YPh6BtG)fi6g6K;pjVr=45GgEihoyK{bmdWF42yS(pQ)01jgm#UcWhWBKnDi zWNc3dW6;@~zt0(ow{d#2-KR~qngQm^m*)&+sV%TAG z3>iuY7o1cpw7i3luG2?-Z&K#p(9S%IFVVAZa&XW2)6_+ktL44XUYjMZt{#?|wHOeD zUWpWC_ARJb>`EtYqQ*ul+UPD@CpNQ#6_!w!rKCDoWz5nNx5O1ll!uCclGc6xm}Xtn z2Zw;tT8a@yHL;88#R*^&q9{_Vfm0VYlu)3LJ`i z>>hc!DOa+hDb;4o5uqrJxh;M{>}oU|y_IFZUEwD36nr0xfCL~%3OMtt8ytTm)h2dN3j*pWEDEBOGI5?3g3&l z6JP^e3u-u%tc}c5wTXJK?p6odwJdM3a(d}t5cH^FGQEJavHc1DN^=Um>$9wwv0CCn zNJ^Vj5BIHB)rzco3g`ce7EW9H>ujxQyX~g_mdBeCx?WFbYhd)j-dTCCQ?pFp9rmxO z)qn6hzd-cJPLgRE;5~!dbl~xV9XtL+OQs+FN&M=^UpK5@17rN~?~!t5POL0&b8wup zHN48XGtxOFuPWtY8@WF&rL$qIXQIdFQODPb{86`ud)&BmWa-7HG97bHOxDWR&yc=~ zeW*H|VaN!*F#2)knNIV};Ug-kz0K4ZPY~E?SGT>0){!QfREQcWx?`Y6bk;e_$}?fs3@u6-oR1XBD!1W)<#=xT?eY(3dqV zaWHl?!BQIAM`FgB&D$LXh~ux&BOTVR?5whF?}+-(OO8j>#Qw!aJl(CP zc5TZ;zsB^oGX(;Bboz^4_-2=F--!CI3ctv$# zqS~eqdYg3vK{vg3a$D1fgIJNPmh^+&oYnMxsAO|k+UUIT3DN9M^4RNx2{yyJNB+5l zLT^Ahkjs{dYP?D|+l=SZULZ7Fe^@$!7nDCd`QY0i=908vm+?%bXKS}J!)F_l7P~v* zs&2b7ZsOL*hyNIk?5?z*IFg$c41nL^yaoE`9QpTwV|W|Ie1T~fK^}|c{zAH=GT!~M zAErx|K^_|F3)T(BV#@U-w6@uyU&rx}hsS&E>h_Lv=^kuc?x!UVq^4C@`uQ+hmX4v! z=0_{KlUzHAtn+yP-^E<1VMlk4=gMCR*V9Q+<(4`jiRTfm(YXB^F8je0wx;g$M%{?P z5~n{wVsq_h-=6-idc$U9xyC}qKa|9RPmS1-ftSZxl{TcBWL+8F;!pCJ{W{lgooA$pahjfn! z!=~9X6!!qY@BYPscXIST9wykcI&?(HU1)C|l=eQM*kdlF7^<`48r`+v)WMXg3Ph(b z#P>_@iYBk!D7-p}b}y2W(&Mv+<%UQ+t480lLW>2J^G|D??NGB-O+18@qD^m&jFAarUAM&4L>@|UZkh@mdnqE7EAtqT`wnOsil5N%JmxR+H*by--I^=4>M&0;J}km7_4S{(lhA+t7lQt)GW;z5^74$#l+lqz zdrHbUFXYRR^!SbZTpG88vyPn-G))YmP-k<_|3d5*sOE*xy-H}E z6`bfu&{xgq`FeTAdHRIg*TAKl75Cum)SXT8FWWfqLUxnLIvx|`yo3E-?x^3C$!U;7 zPUP%`Tj^Sv#dtkFCQ`273we_&ys6m;J;f7WKNscQJM+6#M=|Zl*XDX{9zQBj@kF-F zB;Q>zcNnU<-0}r8=V*@TA4_85i*K^Ko5K>mw=AqB$jo$oLK^ zN8);n=$PJ{(`HMGdm2oixLo*XS@7-XG?P`FW+cuP5ZdfhBDGn5lWq!=4La8as*q!{ z7g6U5r;4}h5u%04_$o7GyqDy`Zj~G97rQH7$!r`>@AXyaVRKv)7NWzf#xgtK!Ea4J zm`meJ_r&wF4NhpMXaC5xZ|v-xL>8Qd$X@#X?S08T)f4#)E%6_%WSc1_WK#UoUy~9V z#G}s6N?ozzbn!G{uE^JnPe%4{P7BLv^au?iyQCwaB9>t>?Y&Lz>ZHdcfZb#3};_rOcgH_+K8-Ea$*SW z?Jw#Clez%hgruy|D3UpuedS@nGH zmuz9xvPpvdUy944hD(ut~A%!Qo|bEWoBemU{O^7m;%^YnN#@;pNQ(XKjC(bI1755Gxw72R#@#;x)JzErRLi&bX=yY${sLoM6k=w?DzhR4-1M`tZpuMktTFs(h@IB{h2-a1&{o^f_JirU3Qrr@JuC^JusVhms4Q$ zn6SI`eF)A7F_!A2dn8xqwl2vdSiP?V`*tAt`HP2v7fnaj>MkTihA>)GN9Ps*>Na9va8LEOqjcfOX2sgWWJ1v2P!Hxyjoc88EWgTd zh@;;zlh|9C{S%$Go1NB|g%3IEw708>B|C5;W7NJeu+ufGb9C;7twQ6{F)L}3$S?W| z!TwqEOOA>?owux%*t*hY>gKN__heB=d$+_s(W^c-M!da@vlEi;*i+G_oyJpbo5_Rv zM>nN)7nsavZ5Ou9C)YQygHyU+ekv^j^y2|I!IgF)rT<9MN%XMFV}~Xy1-kadwG~1= z;i8f(WXzhxx=w-0@N^@TC7asY=xuazDQSO1A1jwqz$s@u>^f3YcfqV1_3-u(ka1*0 z`%~=N;JoIH6zTT3qn}XJWUfGWsrRn7lH*;QLl%K3%KqV0%6kaXg+=!{OsErS18^XD zrzx3KZcAe)MV_4tmLjdjU~_9MZ)STwRKP@@XEiPh#hOD0o{aAJvsfqpFU6?r_{FS< zPbMMpCKLK-V3yF0qJAg|jDVtSe`l}KEL8}WQ0b(b+G<;!T}Js(=2SR(`f+wf@(L9h zu{tZXCYACx1gW@%SWYc{z=_I>^!a7v9Cl8alV#SQe{5N*_APuX;3@I(Y1Hb>vZy%c z=nnl}C5Ov6K8ecHU6Axt`Xq%tp;<=bt*V>cdz?)QA-)gWBr?y4JJ33Pyag+uwypG} zq%7}W>DaZ&zWLGA<(@A*lPo^@I;)LSR+$&waONILzRIm0j~%bXKjNo5pI_@5lVqxj zif%TFM0HgTf0P6|zxsNnNTiMxm^+#r>vDXRK(ja>=SQe-c);U+N}PhOf6g8JJX{;~ zaO`CNi9G7Lm+?$0Tsew`_P0Tr~}_f~6)-Y|?FpB+XxG7AeE4CY1OCj$UlLk{Qiyfh;U zRh(iCfO63r?0H3eb~xb(!oG4ec+48}8`<}0P_P>r!ZX+y3uoVdF$`)LiqL8v{%mx- z*4ZC!gJ|Cfz9T6ixdRVQ2@Pou>!UOsd$Gk{D{xt!HBx%dW`m|XzavtZjMh-tTQ^AnCdj;%$k;Y0H^YCE zV%HK_>*At^oXc7!teF%GTH*%HLSHhm`(7=pB*WV6YMUwJUHE7?w`Ka+LbrO(Ap59SXKm9L5YF`jst;W~SvFFFtEfuY`iq9@zapzA2p&+{ z14#;8j(=Qb3!uou=G97FM^41d8DdQ6LM!zJq=fadmCkpOOlsE$U|$D^Gyfy{FqK^; zUtq)%QMvy}wrY6wGIdXSA{Ef>qlA1K`;cOA8!XN;^mTJ0enZB#mDUP}DFqv&0fm&l zzY(p<=W)J2qjr_0QckOYjdxE_E@%K9>QZ+n08^h@{ z2u@7EL_H`AFu{9wR( zcVm&Xl*<~WxZBhz$f&pp6uo_aH%~>?uzqgwT?tE4gl|HOu0N$e#)N3po$;VxbFA?F zBoZ;F9NDJeY80LpCm@)_Pu%%ii5nSGJ_Ykmqg74Xw_8#hn;Ci8ZeLGk?ms**r2W0McRsA z0!Uf3eQLc|%g?=mE zxkzQ2X}qTv-cdEn|As^iHYOaLtA?g2M|LX$w?V(ak3!r8nD@m>%VbqZ@vT08!1pxM zY-gXYUY9D}-SV^=p0 zAi33nqBPA?;%2ws8||43d_L!&hJUHSG{F2h->LO0+N!4mjUQ{kEbVcJ5sRmWauh<@ zR#4Tx&f_W~vSP6RV((&)O$e+3tsR!&^M56vbIhqDukc1Vej;Rc4^HeboawcjrxBXv zr1@QdbKX-NInLp1<$qt<_cBWNNAjRDUJbU)&tqZbs9JxPz0CarBK!^ zOr!N1&4@q@NtYb_YaV#&h3@Re#S7q8)GX|i(3c}02Qz)v zc$_c%tj+o$OnlSy8c6Jf&Gf|@ZK9gGgsdp`s?|k|j6jj$Z;=E>2EGIf{3RALDq{># z?0c6DdAo#UEM(I-2q|zNvgD8$YSS zq5c3A*uNIV3UVzWWn5I+=na`dA0qFhuNRxusR(N@76vO{z(F&wUe=P*OE|BR@d1ioco&-M51^qK97srJUTh3OiwvSu)+~Z|F9p4 zR>^lvNA@0yzaj3+)pWg38rk^VQF2&|cY#hm?7TLzPTr%B#3|oR<%R4-$O&a6Z)0xl z^#Y?4__n?OnM^So^#s7>U5DS`o*-f7pKh)07F54e50XGH@ z`b-rs0;h6H891Pe0#h}o>t3)t(#r7T<&ftCvW^;RwWh-LME%E|z)XuJ`G}P}Pb-_M z0#H^QQg%>&Mi6KOcZVo$6q}4VuP!3khPL!%LA@4_2^CNQ?Ryq#%;8w7`E=L1C16fN zW)^N6g}GbW`#<7F+J-3TC>{vH8FQsCggjp}l_`8|4y?yB2LF_{oV7vq*v!e~m6AS4 z+N@!BjEkhE0@06%wX1b!?1Y>|M2xlA5^OP+rRmBrzhR8bfw|CmCtL?ui@CYcZGpLV zBV*nel+FHtr2Bmqj$Fone{Kj_iQ-N{j737WDTwfn>K6VG*As|LP`h$--B>bN;sL*s zaTkA7x*pP*kMJ2Zn%G@N2#D0Qu?=z6Qr+px^()&j^trUaYt|T87Efn=k2Vixj>D?O5HAAw{22hT|jk6G56K=r& zk~MqM!oX z+^*e0JjloSfYmh9u@{aNu>g9s=;?Na;u52LR%(%y=$_YIH!UBAe!6Yde}+?0^%os}m) zw|tzx9t08Yf`>SW1o$EjB3P%XI0&Xtsjl4_7cuGs+^$0tVhiot8R3ca-jk z4pC&i1N(XUJ}Hm$GG;GJva7vMcFh`m zOd&F3rB!N|m#SpA-CzS1>G#0ta{UVMMUU{UW4%8X1_duqPXkEbVZ@0OkqKG-`!~+E z6s1X%i)8KBu+x<~yrp+xvmq!33sSgazM{BH_#F=0J0E@$8*hHlvkyzW5MKaJtg2wv zZEEQ~$r0y`woK2FGcs(oRG-O=?Uf&aAjVh(%Z&r|D9`rUUMf&M@}DNwY4winmNOu4 z3rE87lJNEt{0T&y<->V!!?}7okJ;N1Rq<|_Kw}+(N{c$Bb(NJe|TJx1E$f6^51-wLcskKIZT6xWexD6Ilfzt-FTq$-*F* zWlUnH(kPx8$<)$xRUfV^G|%?gF>xB7i*<`4X?66JCY6892R5agUA8G!vh<@Q>W>bG zj$be3+_Ko4SgmL8VsB0ytr)EBDR;J!$0$qmKA#*6yQJ>B8G5mG%f!|{ARsxizUJ@; zTs9bdJSRPQJ2^7>VqC@QKPFxe&OKA}S&{PQN<+#Givr*A@w%vY#p+J#;E5yZ>h8{$ z-`2)v$MMv?lPSJYJay~PXwk&v|7z!0o0__UL548ap&5r63lS(zt=3}E4mj4(fIxw9 zERR|OwBh9-gDFN@5dtZgXl9_O5Ic-PUIB>%;qt7EK?OoEm0~f3$b?5Rghwgi0wzLu zT_Ne-%U|fn=F`ody?gfT*}J=E&pBILGQe9fZ@yd6TLmkB|BU4rw~Xb!$HZ3EoNCVz zb%kGws%qYcmR%b7L%j5QWfpIBDFD<|W?`@MCSwqKQ-eFro+dpE3hGYinUP~MoDS<5^f3<)%dtQxVPp#($-mMkRe1pqSd zZyvIfja+^70ROy69lD(1@}>0k9QN!H$sg_j@rc-7Hk5wi!AZ`LeE@+~;;qP*<2HKg z2%Mv!{{sk4)!1E4+d0u2E?7jOq)5Y|$qsps9Y^U2@H3sb^!63tfc2m84A#Q74`{ z*v=@q2!4yCD2iS%hzGOcU#PC@h8 zZ@Sp#mz@5t4w~LX21(Q-?Lx6lT0IeP5Ih`>TP@OfO?`IB)ZR|(A&`<0PN8^jTERP{ zzhTHB4d_HQW8IRJUgoPwBa2{E8EJH;`%FBaV0MiX?>t6xB`GgD216n40`Wv43&Cm; z=T{$yjD#-RxX30cQr|DQ!=1ONTb1Xta3m2z5sK-?8)Tl^Y1;)y;UmJ^V*fO0J?EL5 z6223DVMOj#^VeKu4g%>tuDK2J5Pa8$%T9F3L@RiCYCJG^sRHfQcr7?wG;xBUOcGwn ziIm@nZ5W)_s2xu+>JFjk?KQ)9YgBGGp)`x1Y7d8~Wg~s69ezDj#S`rH%_lwjGmLZvT7?r71*>0EhIHCwEkJ}gM237c9t)L_gqHU~c8N=jkoN56 z`T^H%9sE`FN{d+Ou?cb2xZNbVJU|~0>zjzdD$YyYCNMCy&jUdv_{xaNK@4KLhxs@o zdm<`Fx!}=rDEO%N!G@gqHZM~^2o0(pNXX$^X$_vGWqY}OI1dN4No1g;RpM2SJ}##% zQtoXfFe+!J@G??2BkP)eoVLnPJ2zN#ks4z%dfz)_`LF>#5HlZVnUDVD0khUg^lZxz zCOrGyDuy{=?ciTWU)qIR>d3rs*KH#Zge()C1kJcOd@eWT!R_5Mwv=Yi(XaNU6 zB2j9v<-m#!J*3^jOtUx_WexIa_rsQTMoDy?&@vAF@?8=6vOBCYR3-S3_t(-=>>aJR zGqy1C=0NYbF%G}Lx}(bq&*SWQzte*5kRl6yV2E!g6~84pSO#{;d!EZ(x(0T^7lRY1 su*R#VwPB9(Sa|&Z_n%5YexZ2Fud(E}_?DA{cW}yQg2KLP4oJN5FCoC4ivR!s literal 0 HcmV?d00001 diff --git a/gui/src/forms/AddModelForm.tsx b/gui/src/forms/AddModelForm.tsx index 61a7142992e..b83d2a8945a 100644 --- a/gui/src/forms/AddModelForm.tsx +++ b/gui/src/forms/AddModelForm.tsx @@ -47,6 +47,7 @@ export function AddModelForm({ providers["gemini"]?.title || "", providers["azure"]?.title || "", providers["ollama"]?.title || "", + providers["openrouter"]?.title || "", ]; const allProviders = Object.entries(providers) diff --git a/gui/src/pages/AddNewModel/configs/openRouterModel.ts b/gui/src/pages/AddNewModel/configs/openRouterModel.ts new file mode 100644 index 00000000000..10ee5aa3d65 --- /dev/null +++ b/gui/src/pages/AddNewModel/configs/openRouterModel.ts @@ -0,0 +1,107 @@ +import { ModelPackage } from "./models"; +import openRouterModelsData from "./openRouterModels.json"; + +interface OpenRouterModel { + id: string; + canonical_slug: string; + hugging_face_id: string; + name: string; + created: number; + description: string; + context_length: number; + architecture: { + modality: string; + instruct_type: string | null; + [key: string]: any; + }; + pricing: { + prompt: string; + completion: string; + request?: string; + image?: string; + [key: string]: any; + }; + top_provider: { + max_completion_tokens?: number; + is_moderated: boolean; + [key: string]: any; + }; + per_request_limits: null | { [key: string]: any }; + supported_parameters: string[]; + default_parameters: null | { [key: string]: any }; +} + +/** + * Convert OpenRouter model data to ModelPackage format + */ +function convertOpenRouterModelToPackage(model: OpenRouterModel): ModelPackage { + // Extract provider name from id (e.g., "openai/gpt-5.1" -> "openai") + const [provider] = model.id.split("/"); + + // Create a friendly title from the name + const title = model.name; + + // Extract context length + const contextLength = model.context_length; + + // Get pricing info for display + const pricingInfo = model.pricing + ? `Prompt: $${model.pricing.prompt}/1K tokens, Completion: $${model.pricing.completion}/1K tokens` + : "Pricing not available"; + + return { + title, + description: model.description, + refUrl: `https://openrouter.ai/models/${model.id}`, + params: { + model: model.id, + contextLength, + }, + isOpenSource: model.architecture?.modality ? false : true, + tags: [provider as any], + }; +} + +/** + * Generate ModelPackage objects from OpenRouter models JSON + */ +export function generateOpenRouterModels(): { + [key: string]: ModelPackage; +} { + const models: { [key: string]: ModelPackage } = {}; + + const data = openRouterModelsData as { data: OpenRouterModel[] }; + + if (!data.data || !Array.isArray(data.data)) { + console.warn("Invalid OpenRouter models data structure"); + return models; + } + + data.data.forEach((model: OpenRouterModel) => { + if (!model.id || !model.name) { + console.warn("Skipping model with missing id or name", model); + return; + } + + // Create a unique key from the model id (replace slashes and dots with underscores) + const key = model.id.replace(/[\/.]/g, "_"); + + try { + models[key] = convertOpenRouterModelToPackage(model); + } catch (error) { + console.error(`Failed to convert model ${model.id}:`, error); + } + }); + + return models; +} + +/** + * Export all OpenRouter models as a pre-generated object + */ +export const openRouterModels = generateOpenRouterModels(); + +/** + * Export OpenRouter models as an array for use in provider packages + */ +export const openRouterModelsList = Object.values(openRouterModels); diff --git a/gui/src/pages/AddNewModel/configs/openRouterModels.json b/gui/src/pages/AddNewModel/configs/openRouterModels.json new file mode 100644 index 00000000000..14662eb2db4 --- /dev/null +++ b/gui/src/pages/AddNewModel/configs/openRouterModels.json @@ -0,0 +1,15743 @@ +{ + "data": [ + { + "id": "openai/gpt-5.1", + "canonical_slug": "openai/gpt-5.1-20251113", + "hugging_face_id": "", + "name": "OpenAI: GPT-5.1", + "created": 1763060305, + "description": "GPT-5.1 is the latest frontier-grade model in the GPT-5 series, offering stronger general-purpose reasoning, improved instruction adherence, and a more natural conversational style compared to GPT-5. It uses adaptive reasoning to allocate computation dynamically, responding quickly to simple queries while spending more depth on complex tasks. The model produces clearer, more grounded explanations with reduced jargon, making it easier to follow even on technical or multi-step problems.\n\nBuilt for broad task coverage, GPT-5.1 delivers consistent gains across math, coding, and structured analysis workloads, with more coherent long-form answers and improved tool-use reliability. It also features refined conversational alignment, enabling warmer, more intuitive responses without compromising precision. GPT-5.1 serves as the primary full-capability successor to GPT-5", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.000000125" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 128000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "tool_choice", + "tools", + "top_logprobs" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-5.1-chat", + "canonical_slug": "openai/gpt-5.1-chat-20251113", + "hugging_face_id": "", + "name": "OpenAI: GPT-5.1 Chat", + "created": 1763060302, + "description": "GPT-5.1 Chat (AKA Instant is the fast, lightweight member of the 5.1 family, optimized for low-latency chat while retaining strong general intelligence. It uses adaptive reasoning to selectively “think” on harder queries, improving accuracy on math, coding, and multi-step tasks without slowing down typical conversations. The model is warmer and more conversational by default, with better instruction following and more stable short-form reasoning. GPT-5.1 Chat is designed for high-throughput, interactive workloads where responsiveness and consistency matter more than deep deliberation.\n", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["file", "image", "text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.000000125" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "top_logprobs" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-5.1-codex", + "canonical_slug": "openai/gpt-5.1-codex-20251113", + "hugging_face_id": "", + "name": "OpenAI: GPT-5.1-Codex", + "created": 1763060298, + "description": "GPT-5.1-Codex is a specialized version of GPT-5.1 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks. The model supports building projects from scratch, feature development, debugging, large-scale refactoring, and code review. Compared to GPT-5.1, Codex is more steerable, adheres closely to developer instructions, and produces cleaner, higher-quality code outputs. Reasoning effort can be adjusted with the `reasoning.effort` parameter. Read the [docs here](https://openrouter.ai/docs/use-cases/reasoning-tokens#reasoning-effort-level)\n\nCodex integrates into developer environments including the CLI, IDE extensions, GitHub, and cloud tasks. It adapts reasoning effort dynamically—providing fast responses for small tasks while sustaining extended multi-hour runs for large projects. The model is trained to perform structured code reviews, catching critical flaws by reasoning over dependencies and validating behavior against tests. It also supports multimodal inputs such as images or screenshots for UI development and integrates tool use for search, dependency installation, and environment setup. Codex is intended specifically for agentic coding applications.", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000125" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 128000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "tool_choice", + "tools", + "top_logprobs" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-5.1-codex-mini", + "canonical_slug": "openai/gpt-5.1-codex-mini-20251113", + "hugging_face_id": "", + "name": "OpenAI: GPT-5.1-Codex-Mini", + "created": 1763057820, + "description": "GPT-5.1-Codex-Mini is a smaller and faster version of GPT-5.1-Codex", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000025", + "completion": "0.000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000025" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "tool_choice", + "tools", + "top_logprobs" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "kwaipilot/kat-coder-pro:free", + "canonical_slug": "kwaipilot/kat-coder-pro-v1", + "hugging_face_id": "", + "name": "Kwaipilot: KAT-Coder-Pro V1 (free)", + "created": 1762745912, + "description": "KAT-Coder-Pro V1 is KwaiKAT's most advanced agentic coding model in the KAT-Coder series. Designed specifically for agentic coding tasks, it excels in real-world software engineering scenarios, achieving 73.4% solve rate on the SWE-Bench Verified benchmark. \n\nThe model has been optimized for tool-use capability, multi-turn interaction, instruction following, generalization, and comprehensive capabilities through a multi-stage training process, including mid-training, supervised fine-tuning (SFT), reinforcement fine-tuning (RFT), and scalable agentic RL.", + "context_length": 256000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": 32000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "moonshotai/kimi-linear-48b-a3b-instruct", + "canonical_slug": "moonshotai/kimi-linear-48b-a3b-instruct-20251029", + "hugging_face_id": "moonshotai/Kimi-Linear-48B-A3B-Instruct", + "name": "MoonshotAI: Kimi Linear 48B A3B Instruct", + "created": 1762565833, + "description": "Kimi Linear is a hybrid linear attention architecture that outperforms traditional full attention methods across various contexts, including short, long, and reinforcement learning (RL) scaling regimes. At its core is Kimi Delta Attention (KDA)—a refined version of Gated DeltaNet that introduces a more efficient gating mechanism to optimize the use of finite-state RNN memory.\n\nKimi Linear achieves superior performance and hardware efficiency, especially for long-context tasks. It reduces the need for large KV caches by up to 75% and boosts decoding throughput by up to 6x for contexts as long as 1M tokens.", + "context_length": 1048576, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 1048576, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "moonshotai/kimi-k2-thinking", + "canonical_slug": "moonshotai/kimi-k2-thinking-20251106", + "hugging_face_id": "moonshotai/Kimi-K2-Thinking", + "name": "MoonshotAI: Kimi K2 Thinking", + "created": 1762440622, + "description": "Kimi K2 Thinking is Moonshot AI’s most advanced open reasoning model to date, extending the K2 series into agentic, long-horizon reasoning. Built on the trillion-parameter Mixture-of-Experts (MoE) architecture introduced in Kimi K2, it activates 32 billion parameters per forward pass and supports 256 k-token context windows. The model is optimized for persistent step-by-step thought, dynamic tool invocation, and complex reasoning workflows that span hundreds of turns. It interleaves step-by-step reasoning with tool use, enabling autonomous research, coding, and writing that can persist for hundreds of sequential actions without drift.\n\nIt sets new open-source benchmarks on HLE, BrowseComp, SWE-Multilingual, and LiveCodeBench, while maintaining stable multi-agent behavior through 200–300 tool calls. Built on a large-scale MoE architecture with MuonClip optimization, it combines strong reasoning depth with high inference efficiency for demanding agentic and analytical tasks.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000055", + "completion": "0.00000225", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "amazon/nova-premier-v1", + "canonical_slug": "amazon/nova-premier-v1", + "hugging_face_id": "", + "name": "Amazon: Nova Premier 1.0", + "created": 1761950332, + "description": "Amazon Nova Premier is the most capable of Amazon’s multimodal models for complex reasoning tasks and for use as the best teacher for distilling custom models.", + "context_length": 1000000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Nova", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.0000125", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000625" + }, + "top_provider": { + "context_length": 1000000, + "max_completion_tokens": 32000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "perplexity/sonar-pro-search", + "canonical_slug": "perplexity/sonar-pro-search", + "hugging_face_id": "", + "name": "Perplexity: Sonar Pro Search", + "created": 1761854366, + "description": "Exclusively available on the OpenRouter API, Sonar Pro's new Pro Search mode is Perplexity's most advanced agentic search system. It is designed for deeper reasoning and analysis. Pricing is based on tokens plus $18 per thousand requests. This model powers the Pro Search mode on the Perplexity platform.\n\nSonar Pro Search adds autonomous, multi-step reasoning to Sonar Pro. So, instead of just one query + synthesis, it plans and executes entire research workflows using tools.", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0.018", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 8000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "structured_outputs", + "temperature", + "top_k", + "top_p", + "web_search_options" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "mistralai/voxtral-small-24b-2507", + "canonical_slug": "mistralai/voxtral-small-24b-2507", + "hugging_face_id": "mistralai/Voxtral-Small-24B-2507", + "name": "Mistral: Voxtral Small 24B 2507", + "created": 1761835144, + "description": "Voxtral Small is an enhancement of Mistral Small 3, incorporating state-of-the-art audio input capabilities while retaining best-in-class text performance. It excels at speech transcription, translation and audio understanding. Input audio is priced at $100 per million seconds.", + "context_length": 32000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text", "audio"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000003", + "request": "0", + "image": "0", + "audio": "0.0001", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.2, + "top_p": 0.95, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-oss-safeguard-20b", + "canonical_slug": "openai/gpt-oss-safeguard-20b", + "hugging_face_id": "openai/gpt-oss-safeguard-20b", + "name": "OpenAI: gpt-oss-safeguard-20b", + "created": 1761752836, + "description": "gpt-oss-safeguard-20b is a safety reasoning model from OpenAI built upon gpt-oss-20b. This open-weight, 21B-parameter Mixture-of-Experts (MoE) model offers lower latency for safety tasks like content classification, LLM filtering, and trust & safety labeling.\n\nLearn more about this model in OpenAI's gpt-oss-safeguard [user guide](https://cookbook.openai.com/articles/gpt-oss-safeguard-guide).", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000000075", + "completion": "0.0000003", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000037" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "nvidia/nemotron-nano-12b-v2-vl:free", + "canonical_slug": "nvidia/nemotron-nano-12b-v2-vl", + "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16", + "name": "NVIDIA: Nemotron Nano 12B 2 VL (free)", + "created": 1761675565, + "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba’s memory-efficient sequence modeling for significantly higher throughput and lower latency.\n\nThe model supports inputs of text and multi-image documents, producing natural-language outputs. It is trained on high-quality NVIDIA-curated synthetic datasets optimized for optical-character recognition, chart reasoning, and multimodal comprehension.\n\nNemotron Nano 2 VL achieves leading results on OCRBench v2 and scores ≈ 74 average across MMMU, MathVista, AI2D, OCRBench, OCR-Reasoning, ChartQA, DocVQA, and Video-MME—surpassing prior open VL baselines. With Efficient Video Sampling (EVS), it handles long-form videos while reducing inference cost.\n\nOpen-weights, training data, and fine-tuning recipes are released under a permissive NVIDIA open license, with deployment supported across NeMo, NIM, and major inference runtimes.", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "video"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 128000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "reasoning", + "tool_choice", + "tools" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "nvidia/nemotron-nano-12b-v2-vl", + "canonical_slug": "nvidia/nemotron-nano-12b-v2-vl", + "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16", + "name": "NVIDIA: Nemotron Nano 12B 2 VL", + "created": 1761675565, + "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba’s memory-efficient sequence modeling for significantly higher throughput and lower latency.\n\nThe model supports inputs of text and multi-image documents, producing natural-language outputs. It is trained on high-quality NVIDIA-curated synthetic datasets optimized for optical-character recognition, chart reasoning, and multimodal comprehension.\n\nNemotron Nano 2 VL achieves leading results on OCRBench v2 and scores ≈ 74 average across MMMU, MathVista, AI2D, OCRBench, OCR-Reasoning, ChartQA, DocVQA, and Video-MME—surpassing prior open VL baselines. With Efficient Video Sampling (EVS), it handles long-form videos while reducing inference cost.\n\nOpen-weights, training data, and fine-tuning recipes are released under a permissive NVIDIA open license, with deployment supported across NeMo, NIM, and major inference runtimes.", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "video"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "minimax/minimax-m2", + "canonical_slug": "minimax/minimax-m2", + "hugging_face_id": "MiniMaxAI/MiniMax-M2", + "name": "MiniMax: MiniMax M2", + "created": 1761252093, + "description": "MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows. With 10 billion activated parameters (230 billion total), it delivers near-frontier intelligence across general reasoning, tool use, and multi-step task execution while maintaining low latency and deployment efficiency.\n\nThe model excels in code generation, multi-file editing, compile-run-fix loops, and test-validated repair, showing strong results on SWE-Bench Verified, Multi-SWE-Bench, and Terminal-Bench. It also performs competitively in agentic evaluations such as BrowseComp and GAIA, effectively handling long-horizon planning, retrieval, and recovery from execution errors.\n\nBenchmarked by [Artificial Analysis](https://artificialanalysis.ai/models/minimax-m2), MiniMax-M2 ranks among the top open-source models for composite intelligence, spanning mathematics, science, and instruction-following. Its small activation footprint enables fast inference, high concurrency, and improved unit economics, making it well-suited for large-scale agents, developer assistants, and reasoning-driven applications that require responsiveness and cost efficiency.\n\nTo avoid degrading this model's performance, MiniMax highly recommends preserving reasoning between turns. Learn more about using reasoning_details to pass back reasoning in our [docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks).", + "context_length": 204800, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000000255", + "completion": "0.00000102", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 204800, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": 1, + "top_p": 0.95, + "frequency_penalty": null + } + }, + { + "id": "liquid/lfm2-8b-a1b", + "canonical_slug": "liquid/lfm2-8b-a1b", + "hugging_face_id": "LiquidAI/LFM2-8B-A1B", + "name": "LiquidAI/LFM2-8B-A1B", + "created": 1760970984, + "description": "Model created via inbox interface", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "liquid/lfm-2.2-6b", + "canonical_slug": "liquid/lfm-2.2-6b", + "hugging_face_id": "LiquidAI/LFM2-2.6B", + "name": "LiquidAI/LFM2-2.6B", + "created": 1760970889, + "description": "LFM2 is a new generation of hybrid models developed by Liquid AI, specifically designed for edge AI and on-device deployment. It sets a new standard in terms of quality, speed, and memory efficiency.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "ibm-granite/granite-4.0-h-micro", + "canonical_slug": "ibm-granite/granite-4.0-h-micro", + "hugging_face_id": "ibm-granite/granite-4.0-h-micro", + "name": "IBM: Granite 4.0 Micro", + "created": 1760927695, + "description": "Granite-4.0-H-Micro is a 3B parameter from the Granite 4 family of models. These models are the latest in a series of models released by IBM. They are fine-tuned for long context tool calling. ", + "context_length": 131000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000000017", + "completion": "0.00000011", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "deepcogito/cogito-v2-preview-llama-405b", + "canonical_slug": "deepcogito/cogito-v2-preview-llama-405b", + "hugging_face_id": "deepcogito/cogito-v2-preview-llama-405B", + "name": "Deep Cogito: Cogito V2 Preview Llama 405B", + "created": 1760709933, + "description": "Cogito v2 405B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection. It represents a significant step toward frontier intelligence with dense architecture delivering performance competitive with leading closed models. This advanced reasoning system combines policy improvement with massive scale for exceptional capabilities.\n", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000035", + "completion": "0.0000035", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-5-image-mini", + "canonical_slug": "openai/gpt-5-image-mini", + "hugging_face_id": "", + "name": "OpenAI: GPT-5 Image Mini", + "created": 1760624583, + "description": "GPT-5 Image Mini combines OpenAI's advanced language capabilities, powered by [GPT-5 Mini](https://openrouter.ai/openai/gpt-5-mini), with GPT Image 1 Mini for efficient image generation. This natively multimodal model features superior instruction following, text rendering, and detailed image editing with reduced latency and cost. It excels at high-quality visual creation while maintaining strong text understanding, making it ideal for applications that require both efficient image generation and text processing at scale.", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext+image", + "input_modalities": ["file", "image", "text"], + "output_modalities": ["image", "text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.000002", + "request": "0", + "image": "0.0000025", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.00000025" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 128000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "anthropic/claude-haiku-4.5", + "canonical_slug": "anthropic/claude-4.5-haiku-20251001", + "hugging_face_id": "", + "name": "Anthropic: Claude Haiku 4.5", + "created": 1760547638, + "description": "Claude Haiku 4.5 is Anthropic’s fastest and most efficient model, delivering near-frontier intelligence at a fraction of the cost and latency of larger Claude models. Matching Claude Sonnet 4’s performance across reasoning, coding, and computer-use tasks, Haiku 4.5 brings frontier-level capability to real-time and high-volume applications.\n\nIt introduces extended thinking to the Haiku line; enabling controllable reasoning depth, summarized or interleaved thought output, and tool-assisted workflows with full support for coding, bash, web search, and computer-use tools. Scoring \u003E73% on SWE-bench Verified, Haiku 4.5 ranks among the world’s best coding models while maintaining exceptional responsiveness for sub-agents, parallelized execution, and scaled deployment.", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000001", + "completion": "0.000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000001", + "input_cache_write": "0.00000125" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 64000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-vl-8b-thinking", + "canonical_slug": "qwen/qwen3-vl-8b-thinking", + "hugging_face_id": "Qwen/Qwen3-VL-8B-Thinking", + "name": "Qwen: Qwen3 VL 8B Thinking", + "created": 1760463746, + "description": "Qwen3-VL-8B-Thinking is the reasoning-optimized variant of the Qwen3-VL-8B multimodal model, designed for advanced visual and textual reasoning across complex scenes, documents, and temporal sequences. It integrates enhanced multimodal alignment and long-context processing (native 256K, expandable to 1M tokens) for tasks such as scientific visual analysis, causal inference, and mathematical reasoning over image or video inputs.\n\nCompared to the Instruct edition, the Thinking version introduces deeper visual-language fusion and deliberate reasoning pathways that improve performance on long-chain logic tasks, STEM problem-solving, and multi-step video understanding. It achieves stronger temporal grounding via Interleaved-MRoPE and timestamp-aware embeddings, while maintaining robust OCR, multilingual comprehension, and text generation on par with large text-only LLMs.", + "context_length": 256000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000018", + "completion": "0.0000021", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 1, + "top_p": 0.95 + } + }, + { + "id": "qwen/qwen3-vl-8b-instruct", + "canonical_slug": "qwen/qwen3-vl-8b-instruct", + "hugging_face_id": "Qwen/Qwen3-VL-8B-Instruct", + "name": "Qwen: Qwen3 VL 8B Instruct", + "created": 1760463308, + "description": "Qwen3-VL-8B-Instruct is a multimodal vision-language model from the Qwen3-VL series, built for high-fidelity understanding and reasoning across text, images, and video. It features improved multimodal fusion with Interleaved-MRoPE for long-horizon temporal reasoning, DeepStack for fine-grained visual-text alignment, and text-timestamp alignment for precise event localization.\n\nThe model supports a native 256K-token context window, extensible to 1M tokens, and handles both static and dynamic media inputs for tasks like document parsing, visual question answering, spatial reasoning, and GUI control. It achieves text understanding comparable to leading LLMs while expanding OCR coverage to 32 languages and enhancing robustness under varied visual conditions.", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000008", + "completion": "0.0000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.7, + "top_p": 0.8, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-5-image", + "canonical_slug": "openai/gpt-5-image", + "hugging_face_id": "", + "name": "OpenAI: GPT-5 Image", + "created": 1760447986, + "description": "[GPT-5](https://openrouter.ai/openai/gpt-5) Image combines OpenAI's most advanced language model with state-of-the-art image generation capabilities. It offers major improvements in reasoning, code quality, and user experience while incorporating GPT Image 1's superior instruction following, text rendering, and detailed image editing.", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext+image", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["image", "text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00001", + "completion": "0.00001", + "request": "0", + "image": "0.00001", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.00000125" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 128000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "inclusionai/ring-1t", + "canonical_slug": "inclusionai/ring-1t", + "hugging_face_id": "inclusionAI/Ring-1T", + "name": "inclusionAI: Ring 1T", + "created": 1760384099, + "description": "Ring-1T has undergone continued scaling with large-scale verifiable reward reinforcement learning (RLVR) training, further unlocking the natural language reasoning capabilities of the trillion-parameter foundation model. Through RLHF training, the model's general abilities have also been refined, making this release of Ring-1T more balanced in performance across various tasks.\n\nRing-1T adopts the Ling 2.0 architecture and is trained on the Ling-1T-base foundation model, which contains 1 trillion total parameters with 50 billion activated parameters, supporting a context window of up to 128K tokens.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000057", + "completion": "0.00000228", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "reasoning", + "response_format", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "inclusionai/ling-1t", + "canonical_slug": "inclusionai/ling-1t", + "hugging_face_id": "inclusionAI/Ling-1T", + "name": "inclusionAI: Ling-1T", + "created": 1760316076, + "description": "Ling-1T is a trillion-parameter open-weight large language model developed by inclusionAI and released under the MIT license. It represents the first flagship non-thinking model in the Ling 2.0 series, built around a sparse-activation architecture with roughly 50 billion active parameters per token. The model supports up to 128 K tokens of context and emphasizes efficient reasoning through an “Evolutionary Chain-of-Thought (Evo-CoT)” training strategy.\n\nPre-trained on more than 20 trillion reasoning-dense tokens, Ling-1T achieves strong results across code generation, mathematics, and logical reasoning benchmarks while maintaining high inference efficiency. It employs FP8 mixed-precision training, MoE routing with QK normalization, and MTP layers for compositional reasoning stability. The model also introduces LPO (Linguistics-unit Policy Optimization) for post-training alignment, enhancing sentence-level semantic control.\n\nLing-1T can perform complex text generation, multilingual reasoning, and front-end code synthesis with a focus on both functionality and aesthetics.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000057", + "completion": "0.00000228", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "response_format", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/o3-deep-research", + "canonical_slug": "openai/o3-deep-research-2025-06-26", + "hugging_face_id": "", + "name": "OpenAI: o3 Deep Research", + "created": 1760129661, + "description": "o3-deep-research is OpenAI's advanced model for deep research, designed to tackle complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost.", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00001", + "completion": "0.00004", + "request": "0", + "image": "0.00765", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.0000025" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/o4-mini-deep-research", + "canonical_slug": "openai/o4-mini-deep-research-2025-06-26", + "hugging_face_id": "", + "name": "OpenAI: o4 Mini Deep Research", + "created": 1760129642, + "description": "o4-mini-deep-research is OpenAI's faster, more affordable deep research model—ideal for tackling complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost.", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["file", "image", "text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000008", + "request": "0", + "image": "0.00153", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.0000005" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "nvidia/llama-3.3-nemotron-super-49b-v1.5", + "canonical_slug": "nvidia/llama-3.3-nemotron-super-49b-v1.5", + "hugging_face_id": "nvidia/Llama-3_3-Nemotron-Super-49B-v1_5", + "name": "NVIDIA: Llama 3.3 Nemotron Super 49B V1.5", + "created": 1760101395, + "description": "Llama-3.3-Nemotron-Super-49B-v1.5 is a 49B-parameter, English-centric reasoning/chat model derived from Meta’s Llama-3.3-70B-Instruct with a 128K context. It’s post-trained for agentic workflows (RAG, tool calling) via SFT across math, code, science, and multi-turn chat, followed by multiple RL stages; Reward-aware Preference Optimization (RPO) for alignment, RL with Verifiable Rewards (RLVR) for step-wise reasoning, and iterative DPO to refine tool-use behavior. A distillation-driven Neural Architecture Search (“Puzzle”) replaces some attention blocks and varies FFN widths to shrink memory footprint and improve throughput, enabling single-GPU (H100/H200) deployment while preserving instruction following and CoT quality.\n\nIn internal evaluations (NeMo-Skills, up to 16 runs, temp = 0.6, top_p = 0.95), the model reports strong reasoning/coding results, e.g., MATH500 pass@1 = 97.4, AIME-2024 = 87.5, AIME-2025 = 82.71, GPQA = 71.97, LiveCodeBench (24.10–25.02) = 73.58, and MMLU-Pro (CoT) = 79.53. The model targets practical inference efficiency (high tokens/s, reduced VRAM) with Transformers/vLLM support and explicit “reasoning on/off” modes (chat-first defaults, greedy recommended when disabled). Suitable for building agents, assistants, and long-context retrieval systems where balanced accuracy-to-cost and reliable tool use matter.\n", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": null + }, + { + "id": "baidu/ernie-4.5-21b-a3b-thinking", + "canonical_slug": "baidu/ernie-4.5-21b-a3b-thinking", + "hugging_face_id": "baidu/ERNIE-4.5-21B-A3B-Thinking", + "name": "Baidu: ERNIE 4.5 21B A3B Thinking", + "created": 1760048887, + "description": "ERNIE-4.5-21B-A3B-Thinking is Baidu's upgraded lightweight MoE model, refined to boost reasoning depth and quality for top-tier performance in logical puzzles, math, science, coding, text generation, and expert-level academic benchmarks.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000007", + "completion": "0.00000028", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.6, + "top_p": 0.95, + "frequency_penalty": null + } + }, + { + "id": "google/gemini-2.5-flash-image", + "canonical_slug": "google/gemini-2.5-flash-image", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Flash Image (Nano Banana)", + "created": 1759870431, + "description": "Gemini 2.5 Flash Image, a.k.a. \"Nano Banana,\" is now generally available. It is a state of the art image generation model with contextual understanding. It is capable of image generation, edits, and multi-turn conversations. Aspect ratios can be controlled with the [image_config API Parameter](https://openrouter.ai/docs/features/multimodal/image-generation#image-aspect-ratio-configuration)", + "context_length": 32768, + "architecture": { + "modality": "text+image-\u003Etext+image", + "input_modalities": ["image", "text"], + "output_modalities": ["image", "text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000025", + "request": "0", + "image": "0.001238", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs", + "temperature", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-vl-30b-a3b-thinking", + "canonical_slug": "qwen/qwen3-vl-30b-a3b-thinking", + "hugging_face_id": "Qwen/Qwen3-VL-30B-A3B-Thinking", + "name": "Qwen: Qwen3 VL 30B A3B Thinking", + "created": 1759794479, + "description": "Qwen3-VL-30B-A3B-Thinking is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Thinking variant enhances reasoning in STEM, math, and complex tasks. It excels in perception of real-world/synthetic categories, 2D/3D spatial grounding, and long-form visual comprehension, achieving competitive multimodal benchmark results. For agentic use, it handles multi-image multi-turn instructions, video timeline alignments, GUI automation, and visual coding from sketches to debugged UI. Text performance matches flagship Qwen3 models, suiting document AI, OCR, UI assistance, spatial tasks, and agent research.", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.8, + "top_p": 0.95 + } + }, + { + "id": "qwen/qwen3-vl-30b-a3b-instruct", + "canonical_slug": "qwen/qwen3-vl-30b-a3b-instruct", + "hugging_face_id": "Qwen/Qwen3-VL-30B-A3B-Instruct", + "name": "Qwen: Qwen3 VL 30B A3B Instruct", + "created": 1759794476, + "description": "Qwen3-VL-30B-A3B-Instruct is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Instruct variant optimizes instruction-following for general multimodal tasks. It excels in perception of real-world/synthetic categories, 2D/3D spatial grounding, and long-form visual comprehension, achieving competitive multimodal benchmark results. For agentic use, it handles multi-image multi-turn instructions, video timeline alignments, GUI automation, and visual coding from sketches to debugged UI. Text performance matches flagship Qwen3 models, suiting document AI, OCR, UI assistance, spatial tasks, and agent research.", + "context_length": 262144, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.0000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": 0.7, + "top_p": 0.8, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-5-pro", + "canonical_slug": "openai/gpt-5-pro-2025-10-06", + "hugging_face_id": "", + "name": "OpenAI: GPT-5 Pro", + "created": 1759776663, + "description": "GPT-5 Pro is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and accuracy in high-stakes use cases. It supports test-time routing features and advanced prompt understanding, including user-specified intent like \"think hard about this.\" Improvements include reductions in hallucination, sycophancy, and better performance in coding, writing, and health-related tasks.", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000015", + "completion": "0.00012", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 128000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "z-ai/glm-4.6", + "canonical_slug": "z-ai/glm-4.6", + "hugging_face_id": "", + "name": "Z.AI: GLM 4.6", + "created": 1759235576, + "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.\nSuperior coding performance: The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.\nAdvanced reasoning: GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.\nMore capable agents: GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.\nRefined writing: Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.", + "context_length": 202752, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.00000175", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 202752, + "max_completion_tokens": 202752, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_a", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": 0.6, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "z-ai/glm-4.6:exacto", + "canonical_slug": "z-ai/glm-4.6", + "hugging_face_id": "", + "name": "Z.AI: GLM 4.6 (exacto)", + "created": 1759235576, + "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.\nSuperior coding performance: The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.\nAdvanced reasoning: GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.\nMore capable agents: GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.\nRefined writing: Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.", + "context_length": 202752, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000045", + "completion": "0.0000019", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 202752, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.6, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "anthropic/claude-sonnet-4.5", + "canonical_slug": "anthropic/claude-4.5-sonnet-20250929", + "hugging_face_id": "", + "name": "Anthropic: Claude Sonnet 4.5", + "created": 1759161676, + "description": "Claude Sonnet 4.5 is Anthropic’s most advanced Sonnet model to date, optimized for real-world agents and coding workflows. It delivers state-of-the-art performance on coding benchmarks such as SWE-bench Verified, with improvements across system design, code security, and specification adherence. The model is designed for extended autonomous operation, maintaining task continuity across sessions and providing fact-based progress tracking.\n\nSonnet 4.5 also introduces stronger agentic capabilities, including improved tool orchestration, speculative parallel execution, and more efficient context and memory management. With enhanced context tracking and awareness of token usage across tool calls, it is particularly well-suited for multi-context and long-running workflows. Use cases span software engineering, cybersecurity, financial analysis, research agents, and other domains requiring sustained reasoning and tool use.", + "context_length": 1000000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" + }, + "top_provider": { + "context_length": 1000000, + "max_completion_tokens": 64000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 1, + "top_p": 1, + "frequency_penalty": null + } + }, + { + "id": "deepseek/deepseek-v3.2-exp", + "canonical_slug": "deepseek/deepseek-v3.2-exp", + "hugging_face_id": "deepseek-ai/DeepSeek-V3.2-Exp", + "name": "DeepSeek: DeepSeek V3.2 Exp", + "created": 1759150481, + "description": "DeepSeek-V3.2-Exp is an experimental large language model released by DeepSeek as an intermediate step between V3.1 and future architectures. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism designed to improve training and inference efficiency in long-context scenarios while maintaining output quality. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model was trained under conditions aligned with V3.1-Terminus to enable direct comparison. Benchmarking shows performance roughly on par with V3.1 across reasoning, coding, and agentic tool-use tasks, with minor tradeoffs and gains depending on the domain. This release focuses on validating architectural optimizations for extended context lengths rather than advancing raw task accuracy, making it primarily a research-oriented model for exploring efficient transformer designs.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-v3.1" + }, + "pricing": { + "prompt": "0.00000027", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": 0.6, + "top_p": 0.95, + "frequency_penalty": null + } + }, + { + "id": "thedrummer/cydonia-24b-v4.1", + "canonical_slug": "thedrummer/cydonia-24b-v4.1", + "hugging_face_id": "thedrummer/cydonia-24b-v4.1", + "name": "TheDrummer: Cydonia 24B V4.1", + "created": 1758931878, + "description": "Uncensored and creative writing model based on Mistral Small 3.2 24B with good recall, prompt adherence, and intelligence.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "relace/relace-apply-3", + "canonical_slug": "relace/relace-apply-3", + "hugging_face_id": "", + "name": "Relace: Relace Apply 3", + "created": 1758891572, + "description": "Relace Apply 3 is a specialized code-patching LLM that merges AI-suggested edits straight into your source files. It can apply updates from GPT-4o, Claude, and others into your files at 10,000 tokens/sec on average.\n\nThe model requires the prompt to be in the following format: \n\u003Cinstruction\u003E{instruction}\u003C/instruction\u003E\n\u003Ccode\u003E{initial_code}\u003C/code\u003E\n\u003Cupdate\u003E{edit_snippet}\u003C/update\u003E\n\nZero Data Retention is enabled for Relace. Learn more about this model in their [documentation](https://docs.relace.ai/api-reference/instant-apply/apply)", + "context_length": 256000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000085", + "completion": "0.00000125", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": 128000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "seed", "stop"], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "google/gemini-2.5-flash-preview-09-2025", + "canonical_slug": "google/gemini-2.5-flash-preview-09-2025", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Flash Preview 09-2025", + "created": 1758820178, + "description": "Gemini 2.5 Flash Preview September 2025 Checkpoint is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in \"thinking\" capabilities, enabling it to provide responses with greater accuracy and nuanced context handling. \n\nAdditionally, Gemini 2.5 Flash is configurable through the \"max tokens for reasoning\" parameter, as described in the documentation (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning).", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "file", "text", "audio", "video"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000025", + "request": "0", + "image": "0.001238", + "audio": "0.000001", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000075", + "input_cache_write": "0.0000003833" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "google/gemini-2.5-flash-lite-preview-09-2025", + "canonical_slug": "google/gemini-2.5-flash-lite-preview-09-2025", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Flash Lite Preview 09-2025", + "created": 1758819686, + "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance across common benchmarks compared to earlier Flash models. By default, \"thinking\" (i.e. multi-pass reasoning) is disabled to prioritize speed, but developers can enable it via the [Reasoning API parameter](https://openrouter.ai/docs/use-cases/reasoning-tokens) to selectively trade off cost for intelligence. ", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file", "audio", "video"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-vl-235b-a22b-thinking", + "canonical_slug": "qwen/qwen3-vl-235b-a22b-thinking", + "hugging_face_id": "Qwen/Qwen3-VL-235B-A22B-Thinking", + "name": "Qwen: Qwen3 VL 235B A22B Thinking", + "created": 1758668690, + "description": "Qwen3-VL-235B-A22B Thinking is a multimodal model that unifies strong text generation with visual understanding across images and video. The Thinking model is optimized for multimodal reasoning in STEM and math. The series emphasizes robust perception (recognition of diverse real-world and synthetic categories), spatial understanding (2D/3D grounding), and long-form visual comprehension, with competitive results on public multimodal benchmarks for both perception and reasoning.\n\nBeyond analysis, Qwen3-VL supports agentic interaction and tool use: it can follow complex instructions over multi-image, multi-turn dialogues; align text to video timelines for precise temporal queries; and operate GUI elements for automation tasks. The models also enable visual coding workflows, turning sketches or mockups into code and assisting with UI debugging, while maintaining strong text-only performance comparable to the flagship Qwen3 language models. This makes Qwen3-VL suitable for production scenarios spanning document AI, multilingual OCR, software/UI assistance, spatial/embodied tasks, and research on vision-language agents.", + "context_length": 262144, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.8, + "top_p": 0.95, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-vl-235b-a22b-instruct", + "canonical_slug": "qwen/qwen3-vl-235b-a22b-instruct", + "hugging_face_id": "Qwen/Qwen3-VL-235B-A22B-Instruct", + "name": "Qwen: Qwen3 VL 235B A22B Instruct", + "created": 1758668687, + "description": "Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video. The Instruct model targets general vision-language use (VQA, document parsing, chart/table extraction, multilingual OCR). The series emphasizes robust perception (recognition of diverse real-world and synthetic categories), spatial understanding (2D/3D grounding), and long-form visual comprehension, with competitive results on public multimodal benchmarks for both perception and reasoning.\n\nBeyond analysis, Qwen3-VL supports agentic interaction and tool use: it can follow complex instructions over multi-image, multi-turn dialogues; align text to video timelines for precise temporal queries; and operate GUI elements for automation tasks. The models also enable visual coding workflows—turning sketches or mockups into code and assisting with UI debugging—while maintaining strong text-only performance comparable to the flagship Qwen3 language models. This makes Qwen3-VL suitable for production scenarios spanning document AI, multilingual OCR, software/UI assistance, spatial/embodied tasks, and research on vision-language agents.", + "context_length": 262144, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000022", + "completion": "0.00000088", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": 0.7, + "top_p": 0.8, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-max", + "canonical_slug": "qwen/qwen3-max", + "hugging_face_id": "", + "name": "Qwen: Qwen3 Max", + "created": 1758662808, + "description": "Qwen3-Max is an updated release built on the Qwen3 series, offering major improvements in reasoning, instruction following, multilingual support, and long-tail knowledge coverage compared to the January 2025 version. It delivers higher accuracy in math, coding, logic, and science tasks, follows complex instructions in Chinese and English more reliably, reduces hallucinations, and produces higher-quality responses for open-ended Q&A, writing, and conversation. The model supports over 100 languages with stronger translation and commonsense reasoning, and is optimized for retrieval-augmented generation (RAG) and tool calling, though it does not include a dedicated “thinking” mode.", + "context_length": 256000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000012", + "completion": "0.000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000024" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 1, + "top_p": 1, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-coder-plus", + "canonical_slug": "qwen/qwen3-coder-plus", + "hugging_face_id": "", + "name": "Qwen: Qwen3 Coder Plus", + "created": 1758662707, + "description": "Qwen3 Coder Plus is Alibaba's proprietary version of the Open Source Qwen3 Coder 480B A35B. It is a powerful coding agent model specializing in autonomous programming via tool calling and environment interaction, combining coding proficiency with versatile general-purpose abilities.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000001", + "completion": "0.000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000001" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-5-codex", + "canonical_slug": "openai/gpt-5-codex", + "hugging_face_id": "", + "name": "OpenAI: GPT-5 Codex", + "created": 1758643403, + "description": "GPT-5-Codex is a specialized version of GPT-5 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks. The model supports building projects from scratch, feature development, debugging, large-scale refactoring, and code review. Compared to GPT-5, Codex is more steerable, adheres closely to developer instructions, and produces cleaner, higher-quality code outputs. Reasoning effort can be adjusted with the `reasoning.effort` parameter. Read the [docs here](https://openrouter.ai/docs/use-cases/reasoning-tokens#reasoning-effort-level)\n\nCodex integrates into developer environments including the CLI, IDE extensions, GitHub, and cloud tasks. It adapts reasoning effort dynamically—providing fast responses for small tasks while sustaining extended multi-hour runs for large projects. The model is trained to perform structured code reviews, catching critical flaws by reasoning over dependencies and validating behavior against tests. It also supports multimodal inputs such as images or screenshots for UI development and integrates tool use for search, dependency installation, and environment setup. Codex is intended specifically for agentic coding applications.", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000125" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 128000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "deepseek/deepseek-v3.1-terminus", + "canonical_slug": "deepseek/deepseek-v3.1-terminus", + "hugging_face_id": "deepseek-ai/DeepSeek-V3.1-Terminus", + "name": "DeepSeek: DeepSeek V3.1 Terminus", + "created": 1758548275, + "description": "DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language consistency and agent capabilities, further optimizing the model's performance in coding and search agents. It is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows. ", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-v3.1" + }, + "pricing": { + "prompt": "0.00000023", + "completion": "0.0000009", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": 163840, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "deepseek/deepseek-v3.1-terminus:exacto", + "canonical_slug": "deepseek/deepseek-v3.1-terminus", + "hugging_face_id": "deepseek-ai/DeepSeek-V3.1-Terminus", + "name": "DeepSeek: DeepSeek V3.1 Terminus (exacto)", + "created": 1758548275, + "description": "DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language consistency and agent capabilities, further optimizing the model's performance in coding and search agents. It is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows. ", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-v3.1" + }, + "pricing": { + "prompt": "0.00000027", + "completion": "0.000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "x-ai/grok-4-fast", + "canonical_slug": "x-ai/grok-4-fast", + "hugging_face_id": "", + "name": "xAI: Grok 4 Fast", + "created": 1758240090, + "description": "Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. It comes in two flavors: non-reasoning and reasoning. Read more about the model on xAI's [news post](http://x.ai/news/grok-4-fast). Reasoning can be enabled using the `reasoning` `enabled` parameter in the API. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#controlling-reasoning-tokens)", + "context_length": 2000000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Grok", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000005" + }, + "top_provider": { + "context_length": 2000000, + "max_completion_tokens": 30000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "logprobs", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "alibaba/tongyi-deepresearch-30b-a3b:free", + "canonical_slug": "alibaba/tongyi-deepresearch-30b-a3b", + "hugging_face_id": "Alibaba-NLP/Tongyi-DeepResearch-30B-A3B", + "name": "Tongyi DeepResearch 30B A3B (free)", + "created": 1758210804, + "description": "Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token. It's optimized for long-horizon, deep information-seeking tasks and delivers state-of-the-art performance on benchmarks like Humanity's Last Exam, BrowserComp, BrowserComp-ZH, WebWalkerQA, GAIA, xbench-DeepSearch, and FRAMES. This makes it superior for complex agentic search, reasoning, and multi-step problem-solving compared to prior models.\n\nThe model includes a fully automated synthetic data pipeline for scalable pre-training, fine-tuning, and reinforcement learning. It uses large-scale continual pre-training on diverse agentic data to boost reasoning and stay fresh. It also features end-to-end on-policy RL with a customized Group Relative Policy Optimization, including token-level gradients and negative sample filtering for stable training. The model supports ReAct for core ability checks and an IterResearch-based 'Heavy' mode for max performance through test-time scaling. It's ideal for advanced research agents, tool use, and heavy inference workflows.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "alibaba/tongyi-deepresearch-30b-a3b", + "canonical_slug": "alibaba/tongyi-deepresearch-30b-a3b", + "hugging_face_id": "Alibaba-NLP/Tongyi-DeepResearch-30B-A3B", + "name": "Tongyi DeepResearch 30B A3B", + "created": 1758210804, + "description": "Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token. It's optimized for long-horizon, deep information-seeking tasks and delivers state-of-the-art performance on benchmarks like Humanity's Last Exam, BrowserComp, BrowserComp-ZH, WebWalkerQA, GAIA, xbench-DeepSearch, and FRAMES. This makes it superior for complex agentic search, reasoning, and multi-step problem-solving compared to prior models.\n\nThe model includes a fully automated synthetic data pipeline for scalable pre-training, fine-tuning, and reinforcement learning. It uses large-scale continual pre-training on diverse agentic data to boost reasoning and stay fresh. It also features end-to-end on-policy RL with a customized Group Relative Policy Optimization, including token-level gradients and negative sample filtering for stable training. The model supports ReAct for core ability checks and an IterResearch-based 'Heavy' mode for max performance through test-time scaling. It's ideal for advanced research agents, tool use, and heavy inference workflows.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000009", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-coder-flash", + "canonical_slug": "qwen/qwen3-coder-flash", + "hugging_face_id": "", + "name": "Qwen: Qwen3 Coder Flash", + "created": 1758115536, + "description": "Qwen3 Coder Flash is Alibaba's fast and cost efficient version of their proprietary Qwen3 Coder Plus. It is a powerful coding agent model specializing in autonomous programming via tool calling and environment interaction, combining coding proficiency with versatile general-purpose abilities.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000008" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "arcee-ai/afm-4.5b", + "canonical_slug": "arcee-ai/afm-4.5b", + "hugging_face_id": "arcee-ai/AFM-4.5B", + "name": "Arcee AI: AFM 4.5B", + "created": 1758040484, + "description": "AFM-4.5B is a 4.5 billion parameter instruction-tuned language model developed by Arcee AI. The model was pretrained on approximately 8 trillion tokens, including 6.5 trillion tokens of general data and 1.5 trillion tokens with an emphasis on mathematical reasoning and code generation. ", + "context_length": 65536, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000000048", + "completion": "0.00000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 65536, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "opengvlab/internvl3-78b", + "canonical_slug": "opengvlab/internvl3-78b", + "hugging_face_id": "OpenGVLab/InternVL3-78B", + "name": "OpenGVLab: InternVL3 78B", + "created": 1757962555, + "description": "The InternVL3 series is an advanced multimodal large language model (MLLM). Compared to InternVL 2.5, InternVL3 demonstrates stronger multimodal perception and reasoning capabilities. \n\nIn addition, InternVL3 is benchmarked against the Qwen2.5 Chat models, whose pre-trained base models serve as the initialization for its language component. Benefiting from Native Multimodal Pre-Training, the InternVL3 series surpasses the Qwen2.5 series in overall text performance.", + "context_length": 32768, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000007", + "completion": "0.00000026", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-next-80b-a3b-thinking", + "canonical_slug": "qwen/qwen3-next-80b-a3b-thinking-2509", + "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Thinking", + "name": "Qwen: Qwen3 Next 80B A3B Thinking", + "created": 1757612284, + "description": "Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured “thinking” traces by default. It’s designed for hard multi-step problems; math proofs, code synthesis/debugging, logic, and agentic planning, and reports strong results across knowledge, reasoning, coding, alignment, and multilingual evaluations. Compared with prior Qwen3 variants, it emphasizes stability under long chains of thought and efficient scaling during inference, and it is tuned to follow complex instructions while reducing repetitive or off-task behavior.\n\nThe model is suitable for agent frameworks and tool use (function calling), retrieval-heavy workflows, and standardized benchmarking where step-by-step solutions are required. It supports long, detailed completions and leverages throughput-oriented techniques (e.g., multi-token prediction) for faster generation. Note that it operates in thinking-only mode.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-next-80b-a3b-instruct", + "canonical_slug": "qwen/qwen3-next-80b-a3b-instruct-2509", + "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Instruct", + "name": "Qwen: Qwen3 Next 80B A3B Instruct", + "created": 1757612213, + "description": "Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without “thinking” traces. It targets complex tasks across reasoning, code generation, knowledge QA, and multilingual use, while remaining robust on alignment and formatting. Compared with prior Qwen3 instruct variants, it focuses on higher throughput and stability on ultra-long inputs and multi-turn dialogues, making it well-suited for RAG, tool use, and agentic workflows that require consistent final answers rather than visible chain-of-thought.\n\nThe model employs scaling-efficient training and decoding to improve parameter efficiency and inference speed, and has been validated on a broad set of public benchmarks where it reaches or approaches larger Qwen3 systems in several categories while outperforming earlier mid-sized baselines. It is best used as a general assistant, code helper, and long-context task solver in production settings where deterministic, instruction-following outputs are preferred.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meituan/longcat-flash-chat:free", + "canonical_slug": "meituan/longcat-flash-chat", + "hugging_face_id": "meituan-longcat/LongCat-Flash-Chat", + "name": "Meituan: LongCat Flash Chat (free)", + "created": 1757427658, + "description": "LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input. It introduces a shortcut-connected MoE design to reduce communication overhead and achieve high throughput while maintaining training stability through advanced scaling strategies such as hyperparameter transfer, deterministic computation, and multi-stage optimization.\n\nThis release, LongCat-Flash-Chat, is a non-thinking foundation model optimized for conversational and agentic tasks. It supports long context windows up to 128K tokens and shows competitive performance across reasoning, coding, instruction following, and domain benchmarks, with particular strengths in tool use and complex multi-step interactions.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meituan/longcat-flash-chat", + "canonical_slug": "meituan/longcat-flash-chat", + "hugging_face_id": "meituan-longcat/LongCat-Flash-Chat", + "name": "Meituan: LongCat Flash Chat", + "created": 1757427658, + "description": "LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input. It introduces a shortcut-connected MoE design to reduce communication overhead and achieve high throughput while maintaining training stability through advanced scaling strategies such as hyperparameter transfer, deterministic computation, and multi-stage optimization.\n\nThis release, LongCat-Flash-Chat, is a non-thinking foundation model optimized for conversational and agentic tasks. It supports long context windows up to 128K tokens and shows competitive performance across reasoning, coding, instruction following, and domain benchmarks, with particular strengths in tool use and complex multi-step interactions.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.00000075", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "temperature", "top_p"], + "default_parameters": {} + }, + { + "id": "qwen/qwen-plus-2025-07-28", + "canonical_slug": "qwen/qwen-plus-2025-07-28", + "hugging_face_id": "", + "name": "Qwen: Qwen Plus 0728", + "created": 1757347599, + "description": "Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.", + "context_length": 1000000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1000000, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen-plus-2025-07-28:thinking", + "canonical_slug": "qwen/qwen-plus-2025-07-28", + "hugging_face_id": "", + "name": "Qwen: Qwen Plus 0728 (thinking)", + "created": 1757347599, + "description": "Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.", + "context_length": 1000000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1000000, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "nvidia/nemotron-nano-9b-v2:free", + "canonical_slug": "nvidia/nemotron-nano-9b-v2", + "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-9B-v2", + "name": "NVIDIA: Nemotron Nano 9B V2 (free)", + "created": 1757106807, + "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and tasks by first generating a reasoning trace and then concluding with a final response. \n\nThe model's reasoning capabilities can be controlled via a system prompt. If the user prefers the model to provide its final answer without intermediate reasoning traces, it can be configured to do so.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "reasoning", + "response_format", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "nvidia/nemotron-nano-9b-v2", + "canonical_slug": "nvidia/nemotron-nano-9b-v2", + "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-9B-v2", + "name": "NVIDIA: Nemotron Nano 9B V2", + "created": 1757106807, + "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and tasks by first generating a reasoning trace and then concluding with a final response. \n\nThe model's reasoning capabilities can be controlled via a system prompt. If the user prefers the model to provide its final answer without intermediate reasoning traces, it can be configured to do so.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000004", + "completion": "0.00000016", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "moonshotai/kimi-k2-0905", + "canonical_slug": "moonshotai/kimi-k2-0905", + "hugging_face_id": "moonshotai/Kimi-K2-Instruct-0905", + "name": "MoonshotAI: Kimi K2 0905", + "created": 1757021147, + "description": "Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2). It is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It supports long-context inference up to 256k tokens, extended from the previous 128k.\n\nThis update improves agentic coding with higher accuracy and better generalization across scaffolds, and enhances frontend coding with more aesthetic and functional outputs for web, 3D, and related tasks. Kimi K2 is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. It excels across coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) benchmarks. The model is trained with a novel stack incorporating the MuonClip optimizer for stable large-scale MoE training.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000039", + "completion": "0.0000019", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "moonshotai/kimi-k2-0905:exacto", + "canonical_slug": "moonshotai/kimi-k2-0905", + "hugging_face_id": "moonshotai/Kimi-K2-Instruct-0905", + "name": "MoonshotAI: Kimi K2 0905 (exacto)", + "created": 1757021147, + "description": "Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2). It is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It supports long-context inference up to 256k tokens, extended from the previous 128k.\n\nThis update improves agentic coding with higher accuracy and better generalization across scaffolds, and enhances frontend coding with more aesthetic and functional outputs for web, 3D, and related tasks. Kimi K2 is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. It excels across coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) benchmarks. The model is trained with a novel stack incorporating the MuonClip optimizer for stable large-scale MoE training.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000006", + "completion": "0.0000025", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepcogito/cogito-v2-preview-llama-70b", + "canonical_slug": "deepcogito/cogito-v2-preview-llama-70b", + "hugging_face_id": "deepcogito/cogito-v2-preview-llama-70B", + "name": "Deep Cogito: Cogito V2 Preview Llama 70B", + "created": 1756831784, + "description": "Cogito v2 70B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection. Built with iterative policy improvement, it delivers strong performance across reasoning tasks while maintaining efficiency through shorter reasoning chains and improved intuition.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000088", + "completion": "0.00000088", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "deepcogito/cogito-v2-preview-llama-109b-moe", + "canonical_slug": "deepcogito/cogito-v2-preview-llama-109b-moe", + "hugging_face_id": "deepcogito/cogito-v2-preview-llama-109B-MoE", + "name": "Cogito V2 Preview Llama 109B", + "created": 1756831568, + "description": "An instruction-tuned, hybrid-reasoning Mixture-of-Experts model built on Llama-4-Scout-17B-16E. Cogito v2 can answer directly or engage an extended “thinking” phase, with alignment guided by Iterated Distillation & Amplification (IDA). It targets coding, STEM, instruction following, and general helpfulness, with stronger multilingual, tool-calling, and reasoning performance than size-equivalent baselines. The model supports long-context use (up to 10M tokens) and standard Transformers workflows. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "context_length": 32767, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Llama4", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000018", + "completion": "0.00000059", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32767, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepcogito/cogito-v2-preview-deepseek-671b", + "canonical_slug": "deepcogito/cogito-v2-preview-deepseek-671b", + "hugging_face_id": "deepcogito/cogito-v2-preview-deepseek-671B-MoE", + "name": "Deep Cogito: Cogito V2 Preview Deepseek 671B", + "created": 1756830949, + "description": "Cogito v2 is a multilingual, instruction-tuned Mixture of Experts (MoE) large language model with 671 billion parameters. It supports both standard and reasoning-based generation modes. The model introduces hybrid reasoning via Iterated Distillation and Amplification (IDA)—an iterative self-improvement strategy designed to scale alignment with general intelligence. Cogito v2 has been optimized for STEM, programming, instruction following, and tool use. It supports 128k context length and offers strong performance in both multilingual and code-heavy environments. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00000125", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "stepfun-ai/step3", + "canonical_slug": "stepfun-ai/step3", + "hugging_face_id": "stepfun-ai/step3", + "name": "StepFun: Step3", + "created": 1756415375, + "description": "Step3 is a cutting-edge multimodal reasoning model—built on a Mixture-of-Experts architecture with 321B total parameters and 38B active. It is designed end-to-end to minimize decoding costs while delivering top-tier performance in vision–language reasoning. Through the co-design of Multi-Matrix Factorization Attention (MFA) and Attention-FFN Disaggregation (AFD), Step3 maintains exceptional efficiency across both flagship and low-end accelerators.", + "context_length": 65536, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000057", + "completion": "0.00000142", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 65536, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "reasoning", + "response_format", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-30b-a3b-thinking-2507", + "canonical_slug": "qwen/qwen3-30b-a3b-thinking-2507", + "hugging_face_id": "Qwen/Qwen3-30B-A3B-Thinking-2507", + "name": "Qwen: Qwen3 30B A3B Thinking 2507", + "created": 1756399192, + "description": "Qwen3-30B-A3B-Thinking-2507 is a 30B parameter Mixture-of-Experts reasoning model optimized for complex tasks requiring extended multi-step thinking. The model is designed specifically for “thinking mode,” where internal reasoning traces are separated from final answers.\n\nCompared to earlier Qwen3-30B releases, this version improves performance across logical reasoning, mathematics, science, coding, and multilingual benchmarks. It also demonstrates stronger instruction following, tool use, and alignment with human preferences. With higher reasoning efficiency and extended output budgets, it is best suited for advanced research, competitive problem solving, and agentic applications requiring structured long-context reasoning.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000009", + "completion": "0.0000003", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "x-ai/grok-code-fast-1", + "canonical_slug": "x-ai/grok-code-fast-1", + "hugging_face_id": "", + "name": "xAI: Grok Code Fast 1", + "created": 1756238927, + "description": "Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding. With reasoning traces visible in the response, developers can steer Grok Code for high-quality work flows.", + "context_length": 256000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Grok", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000002" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": 10000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "logprobs", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "nousresearch/hermes-4-70b", + "canonical_slug": "nousresearch/hermes-4-70b", + "hugging_face_id": "NousResearch/Hermes-4-70B", + "name": "Nous: Hermes 4 70B", + "created": 1756236182, + "description": "Hermes 4 70B is a hybrid reasoning model from Nous Research, built on Meta-Llama-3.1-70B. It introduces the same hybrid mode as the larger 405B release, allowing the model to either respond directly or generate explicit \u003Cthink\u003E...\u003C/think\u003E reasoning traces before answering. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThis 70B variant is trained with the expanded post-training corpus (~60B tokens) emphasizing verified reasoning data, leading to improvements in mathematics, coding, STEM, logic, and structured outputs while maintaining general assistant performance. It supports JSON mode, schema adherence, function calling, and tool use, and is designed for greater steerability with reduced refusal rates.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000011", + "completion": "0.00000038", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "nousresearch/hermes-4-405b", + "canonical_slug": "nousresearch/hermes-4-405b", + "hugging_face_id": "NousResearch/Hermes-4-405B", + "name": "Nous: Hermes 4 405B", + "created": 1756235463, + "description": "Hermes 4 is a large-scale reasoning model built on Meta-Llama-3.1-405B and released by Nous Research. It introduces a hybrid reasoning mode, where the model can choose to deliberate internally with \u003Cthink\u003E...\u003C/think\u003E traces or respond directly, offering flexibility between speed and depth. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model is instruction-tuned with an expanded post-training corpus (~60B tokens) emphasizing reasoning traces, improving performance in math, code, STEM, and logical reasoning, while retaining broad assistant utility. It also supports structured outputs, including JSON mode, schema adherence, function calling, and tool use. Hermes 4 is trained for steerability, lower refusal rates, and alignment toward neutral, user-directed behavior.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemini-2.5-flash-image-preview", + "canonical_slug": "google/gemini-2.5-flash-image-preview", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Flash Image Preview (Nano Banana)", + "created": 1756218977, + "description": "Gemini 2.5 Flash Image Preview, a.k.a. \"Nano Banana,\" is a state of the art image generation model with contextual understanding. It is capable of image generation, edits, and multi-turn conversations.", + "context_length": 32768, + "architecture": { + "modality": "text+image-\u003Etext+image", + "input_modalities": ["image", "text"], + "output_modalities": ["image", "text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000025", + "request": "0", + "image": "0.001238", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs", + "temperature", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "deepseek/deepseek-chat-v3.1:free", + "canonical_slug": "deepseek/deepseek-chat-v3.1", + "hugging_face_id": "deepseek-ai/DeepSeek-V3.1", + "name": "DeepSeek: DeepSeek V3.1 (free)", + "created": 1755779628, + "description": "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows. \n\nIt succeeds the [DeepSeek V3-0324](/deepseek/deepseek-chat-v3-0324) model and performs well on a variety of tasks.", + "context_length": 163800, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-v3.1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163800, + "max_completion_tokens": null, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "seed", + "stop", + "temperature" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-chat-v3.1", + "canonical_slug": "deepseek/deepseek-chat-v3.1", + "hugging_face_id": "deepseek-ai/DeepSeek-V3.1", + "name": "DeepSeek: DeepSeek V3.1", + "created": 1755779628, + "description": "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows. \n\nIt succeeds the [DeepSeek V3-0324](/deepseek/deepseek-chat-v3-0324) model and performs well on a variety of tasks.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-v3.1" + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": 163840, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4o-audio-preview", + "canonical_slug": "openai/gpt-4o-audio-preview", + "hugging_face_id": "", + "name": "OpenAI: GPT-4o Audio", + "created": 1755233061, + "description": "The gpt-4o-audio-preview model adds support for audio inputs as prompts. This enhancement allows the model to detect nuances within audio recordings and add depth to generated user experiences. Audio outputs are currently not supported. Audio tokens are priced at $40 per million input audio tokens.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["audio", "text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.00001", + "request": "0", + "image": "0", + "audio": "0.00004", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-medium-3.1", + "canonical_slug": "mistralai/mistral-medium-3.1", + "hugging_face_id": "", + "name": "Mistral: Mistral Medium 3.1", + "created": 1755095639, + "description": "Mistral Medium 3.1 is an updated version of Mistral Medium 3, which is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances state-of-the-art reasoning and multimodal performance with 8× lower cost compared to traditional large models, making it suitable for scalable deployments across professional and industrial use cases.\n\nThe model excels in domains such as coding, STEM reasoning, and enterprise adaptation. It supports hybrid, on-prem, and in-VPC deployments and is optimized for integration into custom workflows. Mistral Medium 3.1 offers competitive accuracy relative to larger models like Claude Sonnet 3.5/3.7, Llama 4 Maverick, and Command R+, while maintaining broad compatibility across cloud environments.", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "baidu/ernie-4.5-21b-a3b", + "canonical_slug": "baidu/ernie-4.5-21b-a3b", + "hugging_face_id": "baidu/ERNIE-4.5-21B-A3B-PT", + "name": "Baidu: ERNIE 4.5 21B A3B", + "created": 1755034167, + "description": "A sophisticated text-based Mixture-of-Experts (MoE) model featuring 21B total parameters with 3B activated per token, delivering exceptional multimodal understanding and generation through heterogeneous MoE structures and modality-isolated routing. Supporting an extensive 131K token context length, the model achieves efficient inference via multi-expert parallel collaboration and quantization, while advanced post-training techniques including SFT, DPO, and UPO ensure optimized performance across diverse applications with specialized routing and balancing losses for superior task handling.", + "context_length": 120000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000007", + "completion": "0.00000028", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 120000, + "max_completion_tokens": 8000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.8, + "top_p": 0.8, + "frequency_penalty": null + } + }, + { + "id": "baidu/ernie-4.5-vl-28b-a3b", + "canonical_slug": "baidu/ernie-4.5-vl-28b-a3b", + "hugging_face_id": "baidu/ERNIE-4.5-VL-28B-A3B-PT", + "name": "Baidu: ERNIE 4.5 VL 28B A3B", + "created": 1755032836, + "description": "A powerful multimodal Mixture-of-Experts chat model featuring 28B total parameters with 3B activated per token, delivering exceptional text and vision understanding through its innovative heterogeneous MoE structure with modality-isolated routing. Built with scaling-efficient infrastructure for high-throughput training and inference, the model leverages advanced post-training techniques including SFT, DPO, and UPO for optimized performance, while supporting an impressive 131K context length and RLVR alignment for superior cross-modal reasoning and generation capabilities.", + "context_length": 30000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000014", + "completion": "0.00000056", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 30000, + "max_completion_tokens": 8000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "z-ai/glm-4.5v", + "canonical_slug": "z-ai/glm-4.5v", + "hugging_face_id": "zai-org/GLM-4.5V", + "name": "Z.AI: GLM 4.5V", + "created": 1754922288, + "description": "GLM-4.5V is a vision-language foundation model for multimodal agent applications. Built on a Mixture-of-Experts (MoE) architecture with 106B parameters and 12B activated parameters, it achieves state-of-the-art results in video understanding, image Q&A, OCR, and document parsing, with strong gains in front-end web coding, grounding, and spatial reasoning. It offers a hybrid inference mode: a \"thinking mode\" for deep reasoning and a \"non-thinking mode\" for fast responses. Reasoning behavior can be toggled via the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "context_length": 65536, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000006", + "completion": "0.0000018", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000011" + }, + "top_provider": { + "context_length": 65536, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.75, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "ai21/jamba-mini-1.7", + "canonical_slug": "ai21/jamba-mini-1.7", + "hugging_face_id": "ai21labs/AI21-Jamba-Mini-1.7", + "name": "AI21: Jamba Mini 1.7", + "created": 1754670601, + "description": "Jamba Mini 1.7 is a compact and efficient member of the Jamba open model family, incorporating key improvements in grounding and instruction-following while maintaining the benefits of the SSM-Transformer hybrid architecture and 256K context window. Despite its compact size, it delivers accurate, contextually grounded responses and improved steerability.", + "context_length": 256000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "stop", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "ai21/jamba-large-1.7", + "canonical_slug": "ai21/jamba-large-1.7", + "hugging_face_id": "ai21labs/AI21-Jamba-Large-1.7", + "name": "AI21: Jamba Large 1.7", + "created": 1754669020, + "description": "Jamba Large 1.7 is the latest model in the Jamba open family, offering improvements in grounding, instruction-following, and overall efficiency. Built on a hybrid SSM-Transformer architecture with a 256K context window, it delivers more accurate, contextually grounded responses and better steerability than previous versions.", + "context_length": 256000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "stop", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-5-chat", + "canonical_slug": "openai/gpt-5-chat-2025-08-07", + "hugging_face_id": "", + "name": "OpenAI: GPT-5 Chat", + "created": 1754587837, + "description": "GPT-5 Chat is designed for advanced, natural, multimodal, and context-aware conversations for enterprise applications.", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["file", "image", "text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.000000125" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-5", + "canonical_slug": "openai/gpt-5-2025-08-07", + "hugging_face_id": "", + "name": "OpenAI: GPT-5", + "created": 1754587413, + "description": "GPT-5 is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and accuracy in high-stakes use cases. It supports test-time routing features and advanced prompt understanding, including user-specified intent like \"think hard about this.\" Improvements include reductions in hallucination, sycophancy, and better performance in coding, writing, and health-related tasks.", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.000000125" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 128000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-5-mini", + "canonical_slug": "openai/gpt-5-mini-2025-08-07", + "hugging_face_id": "", + "name": "OpenAI: GPT-5 Mini", + "created": 1754587407, + "description": "GPT-5 Mini is a compact version of GPT-5, designed to handle lighter-weight reasoning tasks. It provides the same instruction-following and safety-tuning benefits as GPT-5, but with reduced latency and cost. GPT-5 Mini is the successor to OpenAI's o4-mini model.", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000025", + "completion": "0.000002", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.000000025" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 128000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-5-nano", + "canonical_slug": "openai/gpt-5-nano-2025-08-07", + "hugging_face_id": "", + "name": "OpenAI: GPT-5 Nano", + "created": 1754587402, + "description": "GPT-5-Nano is the smallest and fastest variant in the GPT-5 system, optimized for developer tools, rapid interactions, and ultra-low latency environments. While limited in reasoning depth compared to its larger counterparts, it retains key instruction-following and safety features. It is the successor to GPT-4.1-nano and offers a lightweight option for cost-sensitive or real-time applications.", + "context_length": 400000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.000000005" + }, + "top_provider": { + "context_length": 400000, + "max_completion_tokens": 128000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-oss-120b", + "canonical_slug": "openai/gpt-oss-120b", + "hugging_face_id": "openai/gpt-oss-120b", + "name": "OpenAI: gpt-oss-120b", + "created": 1754414231, + "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized to run on a single H100 GPU with native MXFP4 quantization. The model supports configurable reasoning depth, full chain-of-thought access, and native tool use, including function calling, browsing, and structured output generation.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000004", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-oss-120b:exacto", + "canonical_slug": "openai/gpt-oss-120b", + "hugging_face_id": "openai/gpt-oss-120b", + "name": "OpenAI: gpt-oss-120b (exacto)", + "created": 1754414231, + "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized to run on a single H100 GPU with native MXFP4 quantization. The model supports configurable reasoning depth, full chain-of-thought access, and native tool use, including function calling, browsing, and structured output generation.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.00000024", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-oss-20b:free", + "canonical_slug": "openai/gpt-oss-20b", + "hugging_face_id": "openai/gpt-oss-20b", + "name": "OpenAI: gpt-oss-20b (free)", + "created": 1754414229, + "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for lower-latency inference and deployability on consumer or single-GPU hardware. The model is trained in OpenAI’s Harmony response format and supports reasoning level configuration, fine-tuning, and agentic capabilities including function calling, tool use, and structured outputs.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "openai/gpt-oss-20b", + "canonical_slug": "openai/gpt-oss-20b", + "hugging_face_id": "openai/gpt-oss-20b", + "name": "OpenAI: gpt-oss-20b", + "created": 1754414229, + "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for lower-latency inference and deployability on consumer or single-GPU hardware. The model is trained in OpenAI’s Harmony response format and supports reasoning level configuration, fine-tuning, and agentic capabilities including function calling, tool use, and structured outputs.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000003", + "completion": "0.00000014", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "anthropic/claude-opus-4.1", + "canonical_slug": "anthropic/claude-4.1-opus-20250805", + "hugging_face_id": "", + "name": "Anthropic: Claude Opus 4.1", + "created": 1754411591, + "description": "Claude Opus 4.1 is an updated version of Anthropic’s flagship model, offering improved performance in coding, reasoning, and agentic tasks. It achieves 74.5% on SWE-bench Verified and shows notable gains in multi-file code refactoring, debugging precision, and detail-oriented reasoning. The model supports extended thinking up to 64K tokens and is optimized for tasks involving research, data analysis, and tool-assisted reasoning.", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000015", + "completion": "0.000075", + "request": "0", + "image": "0.024", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000015", + "input_cache_write": "0.00001875" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 32000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "mistralai/codestral-2508", + "canonical_slug": "mistralai/codestral-2508", + "hugging_face_id": "", + "name": "Mistral: Codestral 2508", + "created": 1754079630, + "description": "Mistral's cutting-edge language model for coding released end of July 2025. Codestral specializes in low-latency, high-frequency tasks such as fill-in-the-middle (FIM), code correction and test generation.\n\n[Blog Post](https://mistral.ai/news/codestral-25-08)", + "context_length": 256000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000009", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "qwen/qwen3-coder-30b-a3b-instruct", + "canonical_slug": "qwen/qwen3-coder-30b-a3b-instruct", + "hugging_face_id": "Qwen/Qwen3-Coder-30B-A3B-Instruct", + "name": "Qwen: Qwen3 Coder 30B A3B Instruct", + "created": 1753972379, + "description": "Qwen3-Coder-30B-A3B-Instruct is a 30.5B parameter Mixture-of-Experts (MoE) model with 128 experts (8 active per forward pass), designed for advanced code generation, repository-scale understanding, and agentic tool use. Built on the Qwen3 architecture, it supports a native context length of 256K tokens (extendable to 1M with Yarn) and performs strongly in tasks involving function calls, browser use, and structured code completion.\n\nThis model is optimized for instruction-following without “thinking mode”, and integrates well with OpenAI-compatible tool-use formats. ", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000006", + "completion": "0.00000025", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-30b-a3b-instruct-2507", + "canonical_slug": "qwen/qwen3-30b-a3b-instruct-2507", + "hugging_face_id": "Qwen/Qwen3-30B-A3B-Instruct-2507", + "name": "Qwen: Qwen3 30B A3B Instruct 2507", + "created": 1753806965, + "description": "Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference. It operates in non-thinking mode and is designed for high-quality instruction following, multilingual understanding, and agentic tool use. Post-trained on instruction data, it demonstrates competitive performance across reasoning (AIME, ZebraLogic), coding (MultiPL-E, LiveCodeBench), and alignment (IFEval, WritingBench) benchmarks. It outperforms its non-instruct variant on subjective and open-ended tasks while retaining strong factual and coding performance.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000008", + "completion": "0.00000033", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "z-ai/glm-4.5", + "canonical_slug": "z-ai/glm-4.5", + "hugging_face_id": "zai-org/GLM-4.5", + "name": "Z.AI: GLM 4.5", + "created": 1753471347, + "description": "GLM-4.5 is our latest flagship foundation model, purpose-built for agent-based applications. It leverages a Mixture-of-Experts (MoE) architecture and supports a context length of up to 128k tokens. GLM-4.5 delivers significantly enhanced capabilities in reasoning, code generation, and agent alignment. It supports a hybrid inference mode with two options, a \"thinking mode\" designed for complex reasoning and tool use, and a \"non-thinking mode\" optimized for instant responses. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000035", + "completion": "0.0000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_a", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": { + "temperature": 0.75, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "z-ai/glm-4.5-air:free", + "canonical_slug": "z-ai/glm-4.5-air", + "hugging_face_id": "zai-org/GLM-4.5-Air", + "name": "Z.AI: GLM 4.5 Air (free)", + "created": 1753471258, + "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter size. GLM-4.5-Air also supports hybrid inference modes, offering a \"thinking mode\" for advanced reasoning and tool use, and a \"non-thinking mode\" for real-time interaction. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.75, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "z-ai/glm-4.5-air", + "canonical_slug": "z-ai/glm-4.5-air", + "hugging_face_id": "zai-org/GLM-4.5-Air", + "name": "Z.AI: GLM 4.5 Air", + "created": 1753471258, + "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter size. GLM-4.5-Air also supports hybrid inference modes, offering a \"thinking mode\" for advanced reasoning and tool use, and a \"non-thinking mode\" for real-time interaction. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000013", + "completion": "0.00000085", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 98304, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.75, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-235b-a22b-thinking-2507", + "canonical_slug": "qwen/qwen3-235b-a22b-thinking-2507", + "hugging_face_id": "Qwen/Qwen3-235B-A22B-Thinking-2507", + "name": "Qwen: Qwen3 235B A22B Thinking 2507", + "created": 1753449557, + "description": "Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks. It activates 22B of its 235B parameters per forward pass and natively supports up to 262,144 tokens of context. This \"thinking-only\" variant enhances structured logical reasoning, mathematics, science, and long-form generation, showing strong benchmark performance across AIME, SuperGPQA, LiveCodeBench, and MMLU-Redux. It enforces a special reasoning mode (\u003C/think\u003E) and is designed for high-token outputs (up to 81,920 tokens) in challenging domains.\n\nThe model is instruction-tuned and excels at step-by-step reasoning, tool use, agentic workflows, and multilingual tasks. This release represents the most capable open-source variant in the Qwen3-235B series, surpassing many closed models in structured reasoning use cases.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0.00000011", + "completion": "0.0000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "z-ai/glm-4-32b", + "canonical_slug": "z-ai/glm-4-32b-0414", + "hugging_face_id": "", + "name": "Z.AI: GLM 4 32B ", + "created": 1753376617, + "description": "GLM 4 32B is a cost-effective foundation language model.\n\nIt can efficiently perform complex tasks and has significantly enhanced capabilities in tool use, online search, and code-related intelligent tasks.\n\nIt is made by the same lab behind the thudm models.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.75, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-coder:free", + "canonical_slug": "qwen/qwen3-coder-480b-a35b-07-25", + "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct", + "name": "Qwen: Qwen3 Coder 480B A35B (free)", + "created": 1753230546, + "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over repositories. The model features 480 billion total parameters, with 35 billion active per forward pass (8 out of 160 experts).\n\nPricing for the Alibaba endpoints varies by context length. Once a request is greater than 128k input tokens, the higher pricing is used.", + "context_length": 262000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262000, + "max_completion_tokens": 262000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-coder", + "canonical_slug": "qwen/qwen3-coder-480b-a35b-07-25", + "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct", + "name": "Qwen: Qwen3 Coder 480B A35B", + "created": 1753230546, + "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over repositories. The model features 480 billion total parameters, with 35 billion active per forward pass (8 out of 160 experts).\n\nPricing for the Alibaba endpoints varies by context length. Once a request is greater than 128k input tokens, the higher pricing is used.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000022", + "completion": "0.00000095", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-coder:exacto", + "canonical_slug": "qwen/qwen3-coder-480b-a35b-07-25", + "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct", + "name": "Qwen: Qwen3 Coder 480B A35B (exacto)", + "created": 1753230546, + "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over repositories. The model features 480 billion total parameters, with 35 billion active per forward pass (8 out of 160 experts).\n\nPricing for the Alibaba endpoints varies by context length. Once a request is greater than 128k input tokens, the higher pricing is used.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000038", + "completion": "0.00000153", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "bytedance/ui-tars-1.5-7b", + "canonical_slug": "bytedance/ui-tars-1.5-7b", + "hugging_face_id": "ByteDance-Seed/UI-TARS-1.5-7B", + "name": "ByteDance: UI-TARS 7B ", + "created": 1753205056, + "description": "UI-TARS-1.5 is a multimodal vision-language agent optimized for GUI-based environments, including desktop interfaces, web browsers, mobile systems, and games. Built by ByteDance, it builds upon the UI-TARS framework with reinforcement learning-based reasoning, enabling robust action planning and execution across virtual interfaces.\n\nThis model achieves state-of-the-art results on a range of interactive and grounding benchmarks, including OSworld, WebVoyager, AndroidWorld, and ScreenSpot. It also demonstrates perfect task completion across diverse Poki games and outperforms prior models in Minecraft agent tasks. UI-TARS-1.5 supports thought decomposition during inference and shows strong scaling across variants, with the 1.5 version notably exceeding the performance of earlier 72B and 7B checkpoints.", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 2048, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemini-2.5-flash-lite", + "canonical_slug": "google/gemini-2.5-flash-lite", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Flash Lite", + "created": 1753200276, + "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance across common benchmarks compared to earlier Flash models. By default, \"thinking\" (i.e. multi-pass reasoning) is disabled to prioritize speed, but developers can enable it via the [Reasoning API parameter](https://openrouter.ai/docs/use-cases/reasoning-tokens) to selectively trade off cost for intelligence. ", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file", "audio", "video"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000001", + "input_cache_write": "0.0000001833" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 65535, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-235b-a22b-2507", + "canonical_slug": "qwen/qwen3-235b-a22b-07-25", + "hugging_face_id": "Qwen/Qwen3-235B-A22B-Instruct-2507", + "name": "Qwen: Qwen3 235B A22B Instruct 2507", + "created": 1753119555, + "description": "Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass. It is optimized for general-purpose text generation, including instruction following, logical reasoning, math, code, and tool usage. The model supports a native 262K context length and does not implement \"thinking mode\" (\u003Cthink\u003E blocks).\n\nCompared to its base variant, this version delivers significant gains in knowledge coverage, long-context reasoning, coding benchmarks, and alignment with open-ended tasks. It is particularly strong on multilingual understanding, math reasoning (e.g., AIME, HMMT), and alignment evaluations like Arena-Hard and WritingBench.", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000008", + "completion": "0.00000055", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 262144, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "switchpoint/router", + "canonical_slug": "switchpoint/router", + "hugging_face_id": "", + "name": "Switchpoint Router", + "created": 1752272899, + "description": "Switchpoint AI's router instantly analyzes your request and directs it to the optimal AI from an ever-evolving library. \n\nAs the world of LLMs advances, our router gets smarter, ensuring you always benefit from the industry's newest models without changing your workflow.\n\nThis model is configured for a simple, flat rate per response here on OpenRouter. It's powered by the full routing engine from [Switchpoint AI](https://www.switchpoint.dev).", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000085", + "completion": "0.0000034", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "moonshotai/kimi-k2:free", + "canonical_slug": "moonshotai/kimi-k2", + "hugging_face_id": "moonshotai/Kimi-K2-Instruct", + "name": "MoonshotAI: Kimi K2 0711 (free)", + "created": 1752263252, + "description": "Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. Kimi K2 excels across a broad range of benchmarks, particularly in coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) tasks. It supports long-context inference up to 128K tokens and is designed with a novel training stack that includes the MuonClip optimizer for stable large-scale MoE training.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "seed", "stop", "temperature"], + "default_parameters": {} + }, + { + "id": "moonshotai/kimi-k2", + "canonical_slug": "moonshotai/kimi-k2", + "hugging_face_id": "moonshotai/Kimi-K2-Instruct", + "name": "MoonshotAI: Kimi K2 0711", + "created": 1752263252, + "description": "Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. Kimi K2 excels across a broad range of benchmarks, particularly in coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) tasks. It supports long-context inference up to 128K tokens and is designed with a novel training stack that includes the MuonClip optimizer for stable large-scale MoE training.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000005", + "completion": "0.0000024", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "thudm/glm-4.1v-9b-thinking", + "canonical_slug": "thudm/glm-4.1v-9b-thinking", + "hugging_face_id": "THUDM/GLM-4.1V-9B-Thinking", + "name": "THUDM: GLM 4.1V 9B Thinking", + "created": 1752244385, + "description": "GLM-4.1V-9B-Thinking is a 9B parameter vision-language model developed by THUDM, based on the GLM-4-9B foundation. It introduces a reasoning-centric \"thinking paradigm\" enhanced with reinforcement learning to improve multimodal reasoning, long-context understanding (up to 64K tokens), and complex problem solving. It achieves state-of-the-art performance among models in its class, outperforming even larger models like Qwen-2.5-VL-72B on a majority of benchmark tasks. ", + "context_length": 65536, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000000035", + "completion": "0.000000138", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 65536, + "max_completion_tokens": 8000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/devstral-medium", + "canonical_slug": "mistralai/devstral-medium-2507", + "hugging_face_id": "", + "name": "Mistral: Devstral Medium", + "created": 1752161321, + "description": "Devstral Medium is a high-performance code generation and agentic reasoning model developed jointly by Mistral AI and All Hands AI. Positioned as a step up from Devstral Small, it achieves 61.6% on SWE-Bench Verified, placing it ahead of Gemini 2.5 Pro and GPT-4.1 in code-related tasks, at a fraction of the cost. It is designed for generalization across prompt styles and tool use in code agents and frameworks.\n\nDevstral Medium is available via API only (not open-weight), and supports enterprise deployment on private infrastructure, with optional fine-tuning capabilities.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/devstral-small", + "canonical_slug": "mistralai/devstral-small-2507", + "hugging_face_id": "mistralai/Devstral-Small-2507", + "name": "Mistral: Devstral Small 1.1", + "created": 1752160751, + "description": "Devstral Small 1.1 is a 24B parameter open-weight language model for software engineering agents, developed by Mistral AI in collaboration with All Hands AI. Finetuned from Mistral Small 3.1 and released under the Apache 2.0 license, it features a 128k token context window and supports both Mistral-style function calling and XML output formats.\n\nDesigned for agentic coding workflows, Devstral Small 1.1 is optimized for tasks such as codebase exploration, multi-file edits, and integration into autonomous development agents like OpenHands and Cline. It achieves 53.6% on SWE-Bench Verified, surpassing all other open models on this benchmark, while remaining lightweight enough to run on a single 4090 GPU or Apple silicon machine. The model uses a Tekken tokenizer with a 131k vocabulary and is deployable via vLLM, Transformers, Ollama, LM Studio, and other OpenAI-compatible runtimes.\n", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000007", + "completion": "0.00000028", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "cognitivecomputations/dolphin-mistral-24b-venice-edition:free", + "canonical_slug": "venice/uncensored", + "hugging_face_id": "cognitivecomputations/Dolphin-Mistral-24B-Venice-Edition", + "name": "Venice: Uncensored (free)", + "created": 1752094966, + "description": "Venice Uncensored Dolphin Mistral 24B Venice Edition is a fine-tuned variant of Mistral-Small-24B-Instruct-2501, developed by dphn.ai in collaboration with Venice.ai. This model is designed as an “uncensored” instruct-tuned LLM, preserving user control over alignment, system prompts, and behavior. Intended for advanced and unrestricted use cases, Venice Uncensored emphasizes steerability and transparent behavior, removing default safety and alignment layers typically found in mainstream assistant models.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "x-ai/grok-4", + "canonical_slug": "x-ai/grok-4-07-09", + "hugging_face_id": "", + "name": "xAI: Grok 4", + "created": 1752087689, + "description": "Grok 4 is xAI's latest reasoning model with a 256k context window. It supports parallel tool calling, structured outputs, and both image and text inputs. Note that reasoning is not exposed, reasoning cannot be disabled, and the reasoning effort cannot be specified. Pricing increases once the total tokens in a given request is greater than 128k tokens. See more details on the [xAI docs](https://docs.x.ai/docs/models/grok-4-0709)", + "context_length": 256000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Grok", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000075" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "logprobs", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemma-3n-e2b-it:free", + "canonical_slug": "google/gemma-3n-e2b-it", + "hugging_face_id": "google/gemma-3n-E2B-it", + "name": "Google: Gemma 3n 2B (free)", + "created": 1752074904, + "description": "Gemma 3n E2B IT is a multimodal, instruction-tuned model developed by Google DeepMind, designed to operate efficiently at an effective parameter size of 2B while leveraging a 6B architecture. Based on the MatFormer architecture, it supports nested submodels and modular composition via the Mix-and-Match framework. Gemma 3n models are optimized for low-resource deployment, offering 32K context length and strong multilingual and reasoning performance across common benchmarks. This variant is trained on a diverse corpus including code, math, web, and multimodal data.", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": 2048, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "tencent/hunyuan-a13b-instruct", + "canonical_slug": "tencent/hunyuan-a13b-instruct", + "hugging_face_id": "tencent/Hunyuan-A13B-Instruct", + "name": "Tencent: Hunyuan A13B Instruct", + "created": 1751987664, + "description": "Hunyuan-A13B is a 13B active parameter Mixture-of-Experts (MoE) language model developed by Tencent, with a total parameter count of 80B and support for reasoning via Chain-of-Thought. It offers competitive benchmark performance across mathematics, science, coding, and multi-turn reasoning tasks, while maintaining high inference efficiency via Grouped Query Attention (GQA) and quantization support (FP8, GPTQ, etc.).", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000014", + "completion": "0.00000057", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "reasoning", + "response_format", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "tngtech/deepseek-r1t2-chimera:free", + "canonical_slug": "tngtech/deepseek-r1t2-chimera", + "hugging_face_id": "tngtech/DeepSeek-TNG-R1T2-Chimera", + "name": "TNG: DeepSeek R1T2 Chimera (free)", + "created": 1751986985, + "description": "DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech. It is a 671 B-parameter mixture-of-experts text-generation model assembled from DeepSeek-AI’s R1-0528, R1, and V3-0324 checkpoints with an Assembly-of-Experts merge. The tri-parent design yields strong reasoning performance while running roughly 20 % faster than the original R1 and more than 2× faster than R1-0528 under vLLM, giving a favorable cost-to-intelligence trade-off. The checkpoint supports contexts up to 60 k tokens in standard use (tested to ~130 k) and maintains consistent \u003Cthink\u003E token behaviour, making it suitable for long-context analysis, dialogue and other open-ended generation tasks.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "tngtech/deepseek-r1t2-chimera", + "canonical_slug": "tngtech/deepseek-r1t2-chimera", + "hugging_face_id": "tngtech/DeepSeek-TNG-R1T2-Chimera", + "name": "TNG: DeepSeek R1T2 Chimera", + "created": 1751986985, + "description": "DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech. It is a 671 B-parameter mixture-of-experts text-generation model assembled from DeepSeek-AI’s R1-0528, R1, and V3-0324 checkpoints with an Assembly-of-Experts merge. The tri-parent design yields strong reasoning performance while running roughly 20 % faster than the original R1 and more than 2× faster than R1-0528 under vLLM, giving a favorable cost-to-intelligence trade-off. The checkpoint supports contexts up to 60 k tokens in standard use (tested to ~130 k) and maintains consistent \u003Cthink\u003E token behaviour, making it suitable for long-context analysis, dialogue and other open-ended generation tasks.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": 163840, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "morph/morph-v3-large", + "canonical_slug": "morph/morph-v3-large", + "hugging_face_id": "", + "name": "Morph: Morph V3 Large", + "created": 1751910858, + "description": "Morph's high-accuracy apply model for complex code edits. ~4,500 tokens/sec with 98% accuracy for precise code transformations.\n\nThe model requires the prompt to be in the following format: \n\u003Cinstruction\u003E{instruction}\u003C/instruction\u003E\n\u003Ccode\u003E{initial_code}\u003C/code\u003E\n\u003Cupdate\u003E{edit_snippet}\u003C/update\u003E\n\nZero Data Retention is enabled for Morph. Learn more about this model in their [documentation](https://docs.morphllm.com/quickstart)", + "context_length": 262144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000009", + "completion": "0.0000019", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 262144, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "stop", "temperature"], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "morph/morph-v3-fast", + "canonical_slug": "morph/morph-v3-fast", + "hugging_face_id": "", + "name": "Morph: Morph V3 Fast", + "created": 1751910002, + "description": "Morph's fastest apply model for code edits. ~10,500 tokens/sec with 96% accuracy for rapid code transformations.\n\nThe model requires the prompt to be in the following format: \n\u003Cinstruction\u003E{instruction}\u003C/instruction\u003E\n\u003Ccode\u003E{initial_code}\u003C/code\u003E\n\u003Cupdate\u003E{edit_snippet}\u003C/update\u003E\n\nZero Data Retention is enabled for Morph. Learn more about this model in their [documentation](https://docs.morphllm.com/quickstart)", + "context_length": 81920, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000008", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 81920, + "max_completion_tokens": 38000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "stop", "temperature"], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "baidu/ernie-4.5-vl-424b-a47b", + "canonical_slug": "baidu/ernie-4.5-vl-424b-a47b", + "hugging_face_id": "baidu/ERNIE-4.5-VL-424B-A47B-PT", + "name": "Baidu: ERNIE 4.5 VL 424B A47B ", + "created": 1751300903, + "description": "ERNIE-4.5-VL-424B-A47B is a multimodal Mixture-of-Experts (MoE) model from Baidu’s ERNIE 4.5 series, featuring 424B total parameters with 47B active per token. It is trained jointly on text and image data using a heterogeneous MoE architecture and modality-isolated routing to enable high-fidelity cross-modal reasoning, image understanding, and long-context generation (up to 131k tokens). Fine-tuned with techniques like SFT, DPO, UPO, and RLVR, this model supports both “thinking” and non-thinking inference modes. Designed for vision-language tasks in English and Chinese, it is optimized for efficient scaling and can operate under 4-bit/8-bit quantization.", + "context_length": 123000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000042", + "completion": "0.00000125", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 123000, + "max_completion_tokens": 16000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "baidu/ernie-4.5-300b-a47b", + "canonical_slug": "baidu/ernie-4.5-300b-a47b", + "hugging_face_id": "baidu/ERNIE-4.5-300B-A47B-PT", + "name": "Baidu: ERNIE 4.5 300B A47B ", + "created": 1751300139, + "description": "ERNIE-4.5-300B-A47B is a 300B parameter Mixture-of-Experts (MoE) language model developed by Baidu as part of the ERNIE 4.5 series. It activates 47B parameters per token and supports text generation in both English and Chinese. Optimized for high-throughput inference and efficient scaling, it uses a heterogeneous MoE structure with advanced routing and quantization strategies, including FP8 and 2-bit formats. This version is fine-tuned for language-only tasks and supports reasoning, tool parameters, and extended context lengths up to 131k tokens. Suitable for general-purpose LLM applications with high reasoning and throughput demands.", + "context_length": 123000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000028", + "completion": "0.0000011", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 123000, + "max_completion_tokens": 12000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "thedrummer/anubis-70b-v1.1", + "canonical_slug": "thedrummer/anubis-70b-v1.1", + "hugging_face_id": "TheDrummer/Anubis-70B-v1.1", + "name": "TheDrummer: Anubis 70B V1.1", + "created": 1751208347, + "description": "TheDrummer's Anubis v1.1 is an unaligned, creative Llama 3.3 70B model focused on providing character-driven roleplay & stories. It excels at gritty, visceral prose, unique character adherence, and coherent narratives, while maintaining the instruction following Llama 3.3 70B is known for.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000065", + "completion": "0.000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "inception/mercury", + "canonical_slug": "inception/mercury", + "hugging_face_id": "", + "name": "Inception: Mercury", + "created": 1750973026, + "description": "Mercury is the first diffusion large language model (dLLM). Applying a breakthrough discrete diffusion approach, the model runs 5-10x faster than even speed optimized models like GPT-4.1 Nano and Claude 3.5 Haiku while matching their performance. Mercury's speed enables developers to provide responsive user experiences, including with voice agents, search interfaces, and chatbots. Read more in the [blog post]\n(https://www.inceptionlabs.ai/blog/introducing-mercury) here. ", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000025", + "completion": "0.000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "mistralai/mistral-small-3.2-24b-instruct:free", + "canonical_slug": "mistralai/mistral-small-3.2-24b-instruct-2506", + "hugging_face_id": "mistralai/Mistral-Small-3.2-24B-Instruct-2506", + "name": "Mistral: Mistral Small 3.2 24B (free)", + "created": 1750443016, + "description": "Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling. Compared to the 3.1 release, version 3.2 significantly improves accuracy on WildBench and Arena Hard, reduces infinite generations, and delivers gains in tool use and structured output tasks.\n\nIt supports image and text inputs with structured outputs, function/tool calling, and strong performance across coding (HumanEval+, MBPP), STEM (MMLU, MATH, GPQA), and vision benchmarks (ChartQA, DocVQA).", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mistral-small-3.2-24b-instruct", + "canonical_slug": "mistralai/mistral-small-3.2-24b-instruct-2506", + "hugging_face_id": "mistralai/Mistral-Small-3.2-24B-Instruct-2506", + "name": "Mistral: Mistral Small 3.2 24B", + "created": 1750443016, + "description": "Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling. Compared to the 3.1 release, version 3.2 significantly improves accuracy on WildBench and Arena Hard, reduces infinite generations, and delivers gains in tool use and structured output tasks.\n\nIt supports image and text inputs with structured outputs, function/tool calling, and strong performance across coding (HumanEval+, MBPP), STEM (MMLU, MATH, GPQA), and vision benchmarks (ChartQA, DocVQA).", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000006", + "completion": "0.00000018", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "minimax/minimax-m1", + "canonical_slug": "minimax/minimax-m1", + "hugging_face_id": "", + "name": "MiniMax: MiniMax M1", + "created": 1750200414, + "description": "MiniMax-M1 is a large-scale, open-weight reasoning model designed for extended context and high-efficiency inference. It leverages a hybrid Mixture-of-Experts (MoE) architecture paired with a custom \"lightning attention\" mechanism, allowing it to process long sequences—up to 1 million tokens—while maintaining competitive FLOP efficiency. With 456 billion total parameters and 45.9B active per token, this variant is optimized for complex, multi-step reasoning tasks.\n\nTrained via a custom reinforcement learning pipeline (CISPO), M1 excels in long-context understanding, software engineering, agentic tool use, and mathematical reasoning. Benchmarks show strong performance across FullStackBench, SWE-bench, MATH, GPQA, and TAU-Bench, often outperforming other open models like DeepSeek R1 and Qwen3-235B.", + "context_length": 1000000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.0000022", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1000000, + "max_completion_tokens": 40000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemini-2.5-flash-lite-preview-06-17", + "canonical_slug": "google/gemini-2.5-flash-lite-preview-06-17", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Flash Lite Preview 06-17", + "created": 1750173831, + "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance across common benchmarks compared to earlier Flash models. By default, \"thinking\" (i.e. multi-pass reasoning) is disabled to prioritize speed, but developers can enable it via the [Reasoning API parameter](https://openrouter.ai/docs/use-cases/reasoning-tokens) to selectively trade off cost for intelligence. ", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["file", "image", "text", "audio"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000004", + "request": "0", + "image": "0", + "audio": "0.0000003", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000025", + "input_cache_write": "0.0000001833" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 65535, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemini-2.5-flash", + "canonical_slug": "google/gemini-2.5-flash", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Flash", + "created": 1750172488, + "description": "Gemini 2.5 Flash is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in \"thinking\" capabilities, enabling it to provide responses with greater accuracy and nuanced context handling. \n\nAdditionally, Gemini 2.5 Flash is configurable through the \"max tokens for reasoning\" parameter, as described in the documentation (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning).", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["file", "image", "text", "audio", "video"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000025", + "request": "0", + "image": "0.001238", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000003", + "input_cache_write": "0.0000003833" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 65535, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "google/gemini-2.5-pro", + "canonical_slug": "google/gemini-2.5-pro", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Pro", + "created": 1750169544, + "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file", "audio", "video"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0.00516", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000125", + "input_cache_write": "0.000001625" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "moonshotai/kimi-dev-72b", + "canonical_slug": "moonshotai/kimi-dev-72b", + "hugging_face_id": "moonshotai/Kimi-Dev-72B", + "name": "MoonshotAI: Kimi Dev 72B", + "created": 1750115909, + "description": "Kimi-Dev-72B is an open-source large language model fine-tuned for software engineering and issue resolution tasks. Based on Qwen2.5-72B, it is optimized using large-scale reinforcement learning that applies code patches in real repositories and validates them via full test suite execution—rewarding only correct, robust completions. The model achieves 60.4% on SWE-bench Verified, setting a new benchmark among open-source models for software bug fixing and code reasoning.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000029", + "completion": "0.00000115", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "reasoning", + "response_format", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/o3-pro", + "canonical_slug": "openai/o3-pro-2025-06-10", + "hugging_face_id": "", + "name": "OpenAI: o3 Pro", + "created": 1749598352, + "description": "The o-series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o3-pro model uses more compute to think harder and provide consistently better answers.\n\nNote that BYOK is required for this model. Set up here: https://openrouter.ai/settings/integrations", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "file", "image"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00002", + "completion": "0.00008", + "request": "0", + "image": "0.0153", + "web_search": "0.01", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "x-ai/grok-3-mini", + "canonical_slug": "x-ai/grok-3-mini", + "hugging_face_id": "", + "name": "xAI: Grok 3 Mini", + "created": 1749583245, + "description": "A lightweight model that thinks before responding. Fast, smart, and great for logic-based tasks that do not require deep domain knowledge. The raw thinking traces are accessible.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Grok", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000075" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "logprobs", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "x-ai/grok-3", + "canonical_slug": "x-ai/grok-3", + "hugging_face_id": "", + "name": "xAI: Grok 3", + "created": 1749582908, + "description": "Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in finance, healthcare, law, and science.\n\n", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Grok", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000075" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/magistral-small-2506", + "canonical_slug": "mistralai/magistral-small-2506", + "hugging_face_id": "mistralai/Magistral-Small-2506", + "name": "Mistral: Magistral Small 2506", + "created": 1749569561, + "description": "Magistral Small is a 24B parameter instruction-tuned model based on Mistral-Small-3.1 (2503), enhanced through supervised fine-tuning on traces from Magistral Medium and further refined via reinforcement learning. It is optimized for reasoning and supports a wide multilingual range, including over 20 languages.", + "context_length": 40000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000005", + "completion": "0.0000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40000, + "max_completion_tokens": 40000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/magistral-medium-2506:thinking", + "canonical_slug": "mistralai/magistral-medium-2506", + "hugging_face_id": "", + "name": "Mistral: Magistral Medium 2506 (thinking)", + "created": 1749354054, + "description": "Magistral is Mistral's first reasoning model. It is ideal for general purpose use requiring longer thought processing and better accuracy than with non-reasoning LLMs. From legal research and financial forecasting to software development and creative storytelling — this model solves multi-step challenges where transparency and precision are critical.", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": 40000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/magistral-medium-2506", + "canonical_slug": "mistralai/magistral-medium-2506", + "hugging_face_id": "", + "name": "Mistral: Magistral Medium 2506", + "created": 1749354054, + "description": "Magistral is Mistral's first reasoning model. It is ideal for general purpose use requiring longer thought processing and better accuracy than with non-reasoning LLMs. From legal research and financial forecasting to software development and creative storytelling — this model solves multi-step challenges where transparency and precision are critical.", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": 40000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "google/gemini-2.5-pro-preview", + "canonical_slug": "google/gemini-2.5-pro-preview-06-05", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Pro Preview 06-05", + "created": 1749137257, + "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.\n", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["file", "image", "text", "audio"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0.00516", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000031", + "input_cache_write": "0.000001625" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 65536, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-r1-0528-qwen3-8b:free", + "canonical_slug": "deepseek/deepseek-r1-0528-qwen3-8b", + "hugging_face_id": "deepseek-ai/deepseek-r1-0528-qwen3-8b", + "name": "DeepSeek: DeepSeek R1 0528 Qwen3 8B (free)", + "created": 1748538543, + "description": "DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and Gemini 2.5 Pro.\nIt now tops math, programming, and logic leaderboards, showcasing a step-change in depth-of-thought.\nThe distilled variant, DeepSeek-R1-0528-Qwen3-8B, transfers this chain-of-thought into an 8 B-parameter form, beating standard Qwen3 8B by +10 pp and tying the 235 B “thinking” giant on AIME 2024.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "deepseek/deepseek-r1-0528-qwen3-8b", + "canonical_slug": "deepseek/deepseek-r1-0528-qwen3-8b", + "hugging_face_id": "deepseek-ai/deepseek-r1-0528-qwen3-8b", + "name": "DeepSeek: DeepSeek R1 0528 Qwen3 8B", + "created": 1748538543, + "description": "DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and Gemini 2.5 Pro.\nIt now tops math, programming, and logic leaderboards, showcasing a step-change in depth-of-thought.\nThe distilled variant, DeepSeek-R1-0528-Qwen3-8B, transfers this chain-of-thought into an 8 B-parameter form, beating standard Qwen3 8B by +10 pp and tying the 235 B “thinking” giant on AIME 2024.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.00000002", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "deepseek/deepseek-r1-0528:free", + "canonical_slug": "deepseek/deepseek-r1-0528", + "hugging_face_id": "deepseek-ai/DeepSeek-R1-0528", + "name": "DeepSeek: R1 0528 (free)", + "created": 1748455170, + "description": "May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-r1-0528", + "canonical_slug": "deepseek/deepseek-r1-0528", + "hugging_face_id": "deepseek-ai/DeepSeek-R1-0528", + "name": "DeepSeek: R1 0528", + "created": 1748455170, + "description": "May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.00000175", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": 163840, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "anthropic/claude-opus-4", + "canonical_slug": "anthropic/claude-4-opus-20250522", + "hugging_face_id": "", + "name": "Anthropic: Claude Opus 4", + "created": 1747931245, + "description": "Claude Opus 4 is benchmarked as the world’s best coding model, at time of release, bringing sustained performance on complex, long-running tasks and agent workflows. It sets new benchmarks in software engineering, achieving leading results on SWE-bench (72.5%) and Terminal-bench (43.2%). Opus 4 supports extended, agentic workflows, handling thousands of task steps continuously for hours without degradation. \n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-4)", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000015", + "completion": "0.000075", + "request": "0", + "image": "0.024", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000015", + "input_cache_write": "0.00001875" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 32000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "anthropic/claude-sonnet-4", + "canonical_slug": "anthropic/claude-4-sonnet-20250522", + "hugging_face_id": "", + "name": "Anthropic: Claude Sonnet 4", + "created": 1747930371, + "description": "Claude Sonnet 4 significantly enhances the capabilities of its predecessor, Sonnet 3.7, excelling in both coding and reasoning tasks with improved precision and controllability. Achieving state-of-the-art performance on SWE-bench (72.7%), Sonnet 4 balances capability and computational efficiency, making it suitable for a broad range of applications from routine coding tasks to complex software development projects. Key enhancements include improved autonomous codebase navigation, reduced error rates in agent-driven workflows, and increased reliability in following intricate instructions. Sonnet 4 is optimized for practical everyday use, providing advanced reasoning capabilities while maintaining efficiency and responsiveness in diverse internal and external scenarios.\n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-4)", + "context_length": 1000000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0.0048", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" + }, + "top_provider": { + "context_length": 1000000, + "max_completion_tokens": 64000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/devstral-small-2505", + "canonical_slug": "mistralai/devstral-small-2505", + "hugging_face_id": "mistralai/Devstral-Small-2505", + "name": "Mistral: Devstral Small 2505", + "created": 1747837379, + "description": "Devstral-Small-2505 is a 24B parameter agentic LLM fine-tuned from Mistral-Small-3.1, jointly developed by Mistral AI and All Hands AI for advanced software engineering tasks. It is optimized for codebase exploration, multi-file editing, and integration into coding agents, achieving state-of-the-art results on SWE-Bench Verified (46.8%).\n\nDevstral supports a 128k context window and uses a custom Tekken tokenizer. It is text-only, with the vision encoder removed, and is suitable for local deployment on high-end consumer hardware (e.g., RTX 4090, 32GB RAM Macs). Devstral is best used in agentic workflows via the OpenHands scaffold and is compatible with inference frameworks like vLLM, Transformers, and Ollama. It is released under the Apache 2.0 license.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000006", + "completion": "0.00000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "google/gemma-3n-e4b-it:free", + "canonical_slug": "google/gemma-3n-e4b-it", + "hugging_face_id": "google/gemma-3n-E4B-it", + "name": "Google: Gemma 3n 4B (free)", + "created": 1747776824, + "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks such as text generation, speech recognition, translation, and image analysis. Leveraging innovations like Per-Layer Embedding (PLE) caching and the MatFormer architecture, Gemma 3n dynamically manages memory usage and computational load by selectively activating model parameters, significantly reducing runtime resource requirements.\n\nThis model supports a wide linguistic range (trained in over 140 languages) and features a flexible 32K token context window. Gemma 3n can selectively load parameters, optimizing memory and computational efficiency based on the task or device capabilities, making it well-suited for privacy-focused, offline-capable applications and on-device AI solutions. [Read more in the blog post](https://developers.googleblog.com/en/introducing-gemma-3n/)", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": 2048, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemma-3n-e4b-it", + "canonical_slug": "google/gemma-3n-e4b-it", + "hugging_face_id": "google/gemma-3n-E4B-it", + "name": "Google: Gemma 3n 4B", + "created": 1747776824, + "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks such as text generation, speech recognition, translation, and image analysis. Leveraging innovations like Per-Layer Embedding (PLE) caching and the MatFormer architecture, Gemma 3n dynamically manages memory usage and computational load by selectively activating model parameters, significantly reducing runtime resource requirements.\n\nThis model supports a wide linguistic range (trained in over 140 languages) and features a flexible 32K token context window. Gemma 3n can selectively load parameters, optimizing memory and computational efficiency based on the task or device capabilities, making it well-suited for privacy-focused, offline-capable applications and on-device AI solutions. [Read more in the blog post](https://developers.googleblog.com/en/introducing-gemma-3n/)", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000002", + "completion": "0.00000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/codex-mini", + "canonical_slug": "openai/codex-mini", + "hugging_face_id": "", + "name": "OpenAI: Codex Mini", + "created": 1747409761, + "description": "codex-mini-latest is a fine-tuned version of o4-mini specifically for use in Codex CLI. For direct use in the API, we recommend starting with gpt-4.1.", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000015", + "completion": "0.000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000375" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.3-8b-instruct:free", + "canonical_slug": "meta-llama/llama-3.3-8b-instruct", + "hugging_face_id": "", + "name": "Meta: Llama 3.3 8B Instruct (free)", + "created": 1747230154, + "description": "A lightweight and ultra-fast variant of Llama 3.3 70B, for use when quick response times are needed most.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4028, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "repetition_penalty", + "response_format", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "nousresearch/deephermes-3-mistral-24b-preview", + "canonical_slug": "nousresearch/deephermes-3-mistral-24b-preview", + "hugging_face_id": "NousResearch/DeepHermes-3-Mistral-24B-Preview", + "name": "Nous: DeepHermes 3 Mistral 24B Preview", + "created": 1746830904, + "description": "DeepHermes 3 (Mistral 24B Preview) is an instruction-tuned language model by Nous Research based on Mistral-Small-24B, designed for chat, function calling, and advanced multi-turn reasoning. It introduces a dual-mode system that toggles between intuitive chat responses and structured “deep reasoning” mode using special system prompts. Fine-tuned via distillation from R1, it supports structured output (JSON mode) and function call syntax for agent-based applications.\n\nDeepHermes 3 supports a **reasoning toggle via system prompt**, allowing users to switch between fast, intuitive responses and deliberate, multi-step reasoning. When activated with the following specific system instruction, the model enters a *\"deep thinking\"* mode—generating extended chains of thought wrapped in `\u003Cthink\u003E\u003C/think\u003E` tags before delivering a final answer. \n\nSystem Prompt: You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside \u003Cthink\u003E \u003C/think\u003E tags, and then provide your solution or response to the problem.\n", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.00000059", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-medium-3", + "canonical_slug": "mistralai/mistral-medium-3", + "hugging_face_id": "", + "name": "Mistral: Mistral Medium 3", + "created": 1746627341, + "description": "Mistral Medium 3 is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances state-of-the-art reasoning and multimodal performance with 8× lower cost compared to traditional large models, making it suitable for scalable deployments across professional and industrial use cases.\n\nThe model excels in domains such as coding, STEM reasoning, and enterprise adaptation. It supports hybrid, on-prem, and in-VPC deployments and is optimized for integration into custom workflows. Mistral Medium 3 offers competitive accuracy relative to larger models like Claude Sonnet 3.5/3.7, Llama 4 Maverick, and Command R+, while maintaining broad compatibility across cloud environments.", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "google/gemini-2.5-pro-preview-05-06", + "canonical_slug": "google/gemini-2.5-pro-preview-03-25", + "hugging_face_id": "", + "name": "Google: Gemini 2.5 Pro Preview 05-06", + "created": 1746578513, + "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file", "audio", "video"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0.00516", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000031", + "input_cache_write": "0.000001625" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 65535, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "arcee-ai/spotlight", + "canonical_slug": "arcee-ai/spotlight", + "hugging_face_id": "", + "name": "Arcee AI: Spotlight", + "created": 1746481552, + "description": "Spotlight is a 7‑billion‑parameter vision‑language model derived from Qwen 2.5‑VL and fine‑tuned by Arcee AI for tight image‑text grounding tasks. It offers a 32 k‑token context window, enabling rich multimodal conversations that combine lengthy documents with one or more images. Training emphasized fast inference on consumer GPUs while retaining strong captioning, visual‐question‑answering, and diagram‑analysis accuracy. As a result, Spotlight slots neatly into agent workflows where screenshots, charts or UI mock‑ups need to be interpreted on the fly. Early benchmarks show it matching or out‑scoring larger VLMs such as LLaVA‑1.6 13 B on popular VQA and POPE alignment tests. ", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000018", + "completion": "0.00000018", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 65537, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "arcee-ai/maestro-reasoning", + "canonical_slug": "arcee-ai/maestro-reasoning", + "hugging_face_id": "", + "name": "Arcee AI: Maestro Reasoning", + "created": 1746481269, + "description": "Maestro Reasoning is Arcee's flagship analysis model: a 32 B‑parameter derivative of Qwen 2.5‑32 B tuned with DPO and chain‑of‑thought RL for step‑by‑step logic. Compared to the earlier 7 B preview, the production 32 B release widens the context window to 128 k tokens and doubles pass‑rate on MATH and GSM‑8K, while also lifting code completion accuracy. Its instruction style encourages structured \"thought → answer\" traces that can be parsed or hidden according to user preference. That transparency pairs well with audit‑focused industries like finance or healthcare where seeing the reasoning path matters. In Arcee Conductor, Maestro is automatically selected for complex, multi‑constraint queries that smaller SLMs bounce. ", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000009", + "completion": "0.0000033", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 32000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "arcee-ai/virtuoso-large", + "canonical_slug": "arcee-ai/virtuoso-large", + "hugging_face_id": "", + "name": "Arcee AI: Virtuoso Large", + "created": 1746478885, + "description": "Virtuoso‑Large is Arcee's top‑tier general‑purpose LLM at 72 B parameters, tuned to tackle cross‑domain reasoning, creative writing and enterprise QA. Unlike many 70 B peers, it retains the 128 k context inherited from Qwen 2.5, letting it ingest books, codebases or financial filings wholesale. Training blended DeepSeek R1 distillation, multi‑epoch supervised fine‑tuning and a final DPO/RLHF alignment stage, yielding strong performance on BIG‑Bench‑Hard, GSM‑8K and long‑context Needle‑In‑Haystack tests. Enterprises use Virtuoso‑Large as the \"fallback\" brain in Conductor pipelines when other SLMs flag low confidence. Despite its size, aggressive KV‑cache optimizations keep first‑token latency in the low‑second range on 8× H100 nodes, making it a practical production‑grade powerhouse.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000075", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 64000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "arcee-ai/coder-large", + "canonical_slug": "arcee-ai/coder-large", + "hugging_face_id": "", + "name": "Arcee AI: Coder Large", + "created": 1746478663, + "description": "Coder‑Large is a 32 B‑parameter offspring of Qwen 2.5‑Instruct that has been further trained on permissively‑licensed GitHub, CodeSearchNet and synthetic bug‑fix corpora. It supports a 32k context window, enabling multi‑file refactoring or long diff review in a single call, and understands 30‑plus programming languages with special attention to TypeScript, Go and Terraform. Internal benchmarks show 5–8 pt gains over CodeLlama‑34 B‑Python on HumanEval and competitive BugFix scores thanks to a reinforcement pass that rewards compilable output. The model emits structured explanations alongside code blocks by default, making it suitable for educational tooling as well as production copilot scenarios. Cost‑wise, Together AI prices it well below proprietary incumbents, so teams can scale interactive coding without runaway spend. ", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000005", + "completion": "0.0000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "microsoft/phi-4-reasoning-plus", + "canonical_slug": "microsoft/phi-4-reasoning-plus-04-30", + "hugging_face_id": "microsoft/Phi-4-reasoning-plus", + "name": "Microsoft: Phi 4 Reasoning Plus", + "created": 1746130961, + "description": "Phi-4-reasoning-plus is an enhanced 14B parameter model from Microsoft, fine-tuned from Phi-4 with additional reinforcement learning to boost accuracy on math, science, and code reasoning tasks. It uses the same dense decoder-only transformer architecture as Phi-4, but generates longer, more comprehensive outputs structured into a step-by-step reasoning trace and final answer.\n\nWhile it offers improved benchmark scores over Phi-4-reasoning across tasks like AIME, OmniMath, and HumanEvalPlus, its responses are typically ~50% longer, resulting in higher latency. Designed for English-only applications, it is well-suited for structured reasoning workflows where output quality takes priority over response speed.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000007", + "completion": "0.00000035", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "inception/mercury-coder", + "canonical_slug": "inception/mercury-coder-small-beta", + "hugging_face_id": "", + "name": "Inception: Mercury Coder", + "created": 1746033880, + "description": "Mercury Coder is the first diffusion large language model (dLLM). Applying a breakthrough discrete diffusion approach, the model runs 5-10x faster than even speed optimized models like Claude 3.5 Haiku and GPT-4o Mini while matching their performance. Mercury Coder's speed means that developers can stay in the flow while coding, enjoying rapid chat-based iteration and responsive code completion suggestions. On Copilot Arena, Mercury Coder ranks 1st in speed and ties for 2nd in quality. Read more in the [blog post here](https://www.inceptionlabs.ai/blog/introducing-mercury).", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000025", + "completion": "0.000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen3-4b:free", + "canonical_slug": "qwen/qwen3-4b-04-28", + "hugging_face_id": "Qwen/Qwen3-4B", + "name": "Qwen: Qwen3 4B (free)", + "created": 1746031104, + "description": "Qwen3-4B is a 4 billion parameter dense language model from the Qwen3 series, designed to support both general-purpose and reasoning-intensive tasks. It introduces a dual-mode architecture—thinking and non-thinking—allowing dynamic switching between high-precision logical reasoning and efficient dialogue generation. This makes it well-suited for multi-turn chat, instruction following, and complex agent workflows.", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "response_format", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-prover-v2", + "canonical_slug": "deepseek/deepseek-prover-v2", + "hugging_face_id": "deepseek-ai/DeepSeek-Prover-V2-671B", + "name": "DeepSeek: DeepSeek Prover V2", + "created": 1746013094, + "description": "DeepSeek Prover V2 is a 671B parameter model, speculated to be geared towards logic and mathematics. Likely an upgrade from [DeepSeek-Prover-V1.5](https://huggingface.co/deepseek-ai/DeepSeek-Prover-V1.5-RL) Not much is known about the model yet, as DeepSeek released it on Hugging Face without an announcement or description.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000005", + "completion": "0.00000218", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-guard-4-12b", + "canonical_slug": "meta-llama/llama-guard-4-12b", + "hugging_face_id": "meta-llama/Llama-Guard-4-12B", + "name": "Meta: Llama Guard 4 12B", + "created": 1745975193, + "description": "Llama Guard 4 is a Llama 4 Scout-derived multimodal pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM—generating text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.\n\nLlama Guard 4 was aligned to safeguard against the standardized MLCommons hazards taxonomy and designed to support multimodal Llama 4 capabilities. Specifically, it combines features from previous Llama Guard models, providing content moderation for English and multiple supported languages, along with enhanced capabilities to handle mixed text-and-image prompts, including multiple images. Additionally, Llama Guard 4 is integrated into the Llama Moderations API, extending robust safety classification to text and images.", + "context_length": 163840, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000018", + "completion": "0.00000018", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-30b-a3b:free", + "canonical_slug": "qwen/qwen3-30b-a3b-04-28", + "hugging_face_id": "Qwen/Qwen3-30B-A3B", + "name": "Qwen: Qwen3 30B A3B (free)", + "created": 1745878604, + "description": "Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent tasks. Its unique ability to switch seamlessly between a thinking mode for complex reasoning and a non-thinking mode for efficient dialogue ensures versatile, high-quality performance.\n\nSignificantly outperforming prior models like QwQ and Qwen2.5, Qwen3 delivers superior mathematics, coding, commonsense reasoning, creative writing, and interactive dialogue capabilities. The Qwen3-30B-A3B variant includes 30.5 billion parameters (3.3 billion activated), 48 layers, 128 experts (8 activated per task), and supports up to 131K token contexts with YaRN, setting a new standard among open-source models.", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-30b-a3b", + "canonical_slug": "qwen/qwen3-30b-a3b-04-28", + "hugging_face_id": "Qwen/Qwen3-30B-A3B", + "name": "Qwen: Qwen3 30B A3B", + "created": 1745878604, + "description": "Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent tasks. Its unique ability to switch seamlessly between a thinking mode for complex reasoning and a non-thinking mode for efficient dialogue ensures versatile, high-quality performance.\n\nSignificantly outperforming prior models like QwQ and Qwen2.5, Qwen3 delivers superior mathematics, coding, commonsense reasoning, creative writing, and interactive dialogue capabilities. The Qwen3-30B-A3B variant includes 30.5 billion parameters (3.3 billion activated), 48 layers, 128 experts (8 activated per task), and supports up to 131K token contexts with YaRN, setting a new standard among open-source models.", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0.00000006", + "completion": "0.00000022", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": 40960, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-8b", + "canonical_slug": "qwen/qwen3-8b-04-28", + "hugging_face_id": "Qwen/Qwen3-8B", + "name": "Qwen: Qwen3 8B", + "created": 1745876632, + "description": "Qwen3-8B is a dense 8.2B parameter causal language model from the Qwen3 series, designed for both reasoning-heavy tasks and efficient dialogue. It supports seamless switching between \"thinking\" mode for math, coding, and logical inference, and \"non-thinking\" mode for general conversation. The model is fine-tuned for instruction-following, agent integration, creative writing, and multilingual use across 100+ languages and dialects. It natively supports a 32K token context window and can extend to 131K tokens with YaRN scaling.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0.000000035", + "completion": "0.000000138", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 20000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-14b:free", + "canonical_slug": "qwen/qwen3-14b-04-28", + "hugging_face_id": "Qwen/Qwen3-14B", + "name": "Qwen: Qwen3 14B (free)", + "created": 1745876478, + "description": "Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for tasks like math, programming, and logical inference, and a \"non-thinking\" mode for general-purpose conversation. The model is fine-tuned for instruction-following, agent tool use, creative writing, and multilingual tasks across 100+ languages and dialects. It natively handles 32K token contexts and can extend to 131K tokens using YaRN-based scaling.", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-14b", + "canonical_slug": "qwen/qwen3-14b-04-28", + "hugging_face_id": "Qwen/Qwen3-14B", + "name": "Qwen: Qwen3 14B", + "created": 1745876478, + "description": "Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for tasks like math, programming, and logical inference, and a \"non-thinking\" mode for general-purpose conversation. The model is fine-tuned for instruction-following, agent tool use, creative writing, and multilingual tasks across 100+ languages and dialects. It natively handles 32K token contexts and can extend to 131K tokens using YaRN-based scaling.", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.00000022", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": 40960, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-32b", + "canonical_slug": "qwen/qwen3-32b-04-28", + "hugging_face_id": "Qwen/Qwen3-32B", + "name": "Qwen: Qwen3 32B", + "created": 1745875945, + "description": "Qwen3-32B is a dense 32.8B parameter causal language model from the Qwen3 series, optimized for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for tasks like math, coding, and logical inference, and a \"non-thinking\" mode for faster, general-purpose conversation. The model demonstrates strong performance in instruction-following, agent tool use, creative writing, and multilingual tasks across 100+ languages and dialects. It natively handles 32K token contexts and can extend to 131K tokens using YaRN-based scaling. ", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": 40960, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-235b-a22b:free", + "canonical_slug": "qwen/qwen3-235b-a22b-04-28", + "hugging_face_id": "Qwen/Qwen3-235B-A22B", + "name": "Qwen: Qwen3 235B A22B (free)", + "created": 1745875757, + "description": "Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass. It supports seamless switching between a \"thinking\" mode for complex reasoning, math, and code tasks, and a \"non-thinking\" mode for general conversational efficiency. The model demonstrates strong reasoning ability, multilingual support (100+ languages and dialects), advanced instruction-following, and agent tool-calling capabilities. It natively handles a 32K token context window and extends up to 131K tokens using YaRN-based scaling.", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen3-235b-a22b", + "canonical_slug": "qwen/qwen3-235b-a22b-04-28", + "hugging_face_id": "Qwen/Qwen3-235B-A22B", + "name": "Qwen: Qwen3 235B A22B", + "created": 1745875757, + "description": "Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass. It supports seamless switching between a \"thinking\" mode for complex reasoning, math, and code tasks, and a \"non-thinking\" mode for general conversational efficiency. The model demonstrates strong reasoning ability, multilingual support (100+ languages and dialects), advanced instruction-following, and agent tool-calling capabilities. It natively handles a 32K token context window and extends up to 131K tokens using YaRN-based scaling.", + "context_length": 40960, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen3", + "instruct_type": "qwen3" + }, + "pricing": { + "prompt": "0.00000018", + "completion": "0.00000054", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 40960, + "max_completion_tokens": 40960, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "tngtech/deepseek-r1t-chimera:free", + "canonical_slug": "tngtech/deepseek-r1t-chimera", + "hugging_face_id": "tngtech/DeepSeek-R1T-Chimera", + "name": "TNG: DeepSeek R1T Chimera (free)", + "created": 1745760875, + "description": "DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3. It is based on a DeepSeek-MoE Transformer architecture and is optimized for general text generation tasks.\n\nThe model merges pretrained weights from both source models to balance performance across reasoning, efficiency, and instruction-following tasks. It is released under the MIT license and intended for research and commercial use.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "tngtech/deepseek-r1t-chimera", + "canonical_slug": "tngtech/deepseek-r1t-chimera", + "hugging_face_id": "tngtech/DeepSeek-R1T-Chimera", + "name": "TNG: DeepSeek R1T Chimera", + "created": 1745760875, + "description": "DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3. It is based on a DeepSeek-MoE Transformer architecture and is optimized for general text generation tasks.\n\nThe model merges pretrained weights from both source models to balance performance across reasoning, efficiency, and instruction-following tasks. It is released under the MIT license and intended for research and commercial use.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": 163840, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "microsoft/mai-ds-r1:free", + "canonical_slug": "microsoft/mai-ds-r1", + "hugging_face_id": "microsoft/MAI-DS-R1", + "name": "Microsoft: MAI DS R1 (free)", + "created": 1745194100, + "description": "MAI-DS-R1 is a post-trained variant of DeepSeek-R1 developed by the Microsoft AI team to improve the model’s responsiveness on previously blocked topics while enhancing its safety profile. Built on top of DeepSeek-R1’s reasoning foundation, it integrates 110k examples from the Tulu-3 SFT dataset and 350k internally curated multilingual safety-alignment samples. The model retains strong reasoning, coding, and problem-solving capabilities, while unblocking a wide range of prompts previously restricted in R1.\n\nMAI-DS-R1 demonstrates improved performance on harm mitigation benchmarks and maintains competitive results across general reasoning tasks. It surpasses R1-1776 in satisfaction metrics for blocked queries and reduces leakage in harmful content categories. The model is based on a transformer MoE architecture and is suitable for general-purpose use cases, excluding high-stakes domains such as legal, medical, or autonomous systems.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "microsoft/mai-ds-r1", + "canonical_slug": "microsoft/mai-ds-r1", + "hugging_face_id": "microsoft/MAI-DS-R1", + "name": "Microsoft: MAI DS R1", + "created": 1745194100, + "description": "MAI-DS-R1 is a post-trained variant of DeepSeek-R1 developed by the Microsoft AI team to improve the model’s responsiveness on previously blocked topics while enhancing its safety profile. Built on top of DeepSeek-R1’s reasoning foundation, it integrates 110k examples from the Tulu-3 SFT dataset and 350k internally curated multilingual safety-alignment samples. The model retains strong reasoning, coding, and problem-solving capabilities, while unblocking a wide range of prompts previously restricted in R1.\n\nMAI-DS-R1 demonstrates improved performance on harm mitigation benchmarks and maintains competitive results across general reasoning tasks. It surpasses R1-1776 in satisfaction metrics for blocked queries and reduces leakage in harmful content categories. The model is based on a transformer MoE architecture and is suitable for general-purpose use cases, excluding high-stakes domains such as legal, medical, or autonomous systems.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": 163840, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/o4-mini-high", + "canonical_slug": "openai/o4-mini-high-2025-04-16", + "hugging_face_id": "", + "name": "OpenAI: o4 Mini High", + "created": 1744824212, + "description": "OpenAI o4-mini-high is the same model as [o4-mini](/openai/o4-mini) with reasoning_effort set to high. \n\nOpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning and coding performance across benchmarks like AIME (99.5% with Python) and SWE-bench, outperforming its predecessor o3-mini and even approaching o3 in some domains.\n\nDespite its smaller size, o4-mini exhibits high accuracy in STEM tasks, visual problem solving (e.g., MathVista, MMMU), and code editing. It is especially well-suited for high-throughput scenarios where latency or cost is critical. Thanks to its efficient architecture and refined reinforcement learning training, o4-mini can chain tools, generate structured outputs, and solve multi-step tasks with minimal delay—often in under a minute.", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000011", + "completion": "0.0000044", + "request": "0", + "image": "0.0008415", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.000000275" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "openai/o3", + "canonical_slug": "openai/o3-2025-04-16", + "hugging_face_id": "", + "name": "OpenAI: o3", + "created": 1744823457, + "description": "o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following. Use it to think through multi-step problems that involve analysis across text, code, and images. ", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000008", + "request": "0", + "image": "0.00153", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.0000005" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "openai/o4-mini", + "canonical_slug": "openai/o4-mini-2025-04-16", + "hugging_face_id": "", + "name": "OpenAI: o4 Mini", + "created": 1744820942, + "description": "OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning and coding performance across benchmarks like AIME (99.5% with Python) and SWE-bench, outperforming its predecessor o3-mini and even approaching o3 in some domains.\n\nDespite its smaller size, o4-mini exhibits high accuracy in STEM tasks, visual problem solving (e.g., MathVista, MMMU), and code editing. It is especially well-suited for high-throughput scenarios where latency or cost is critical. Thanks to its efficient architecture and refined reinforcement learning training, o4-mini can chain tools, generate structured outputs, and solve multi-step tasks with minimal delay—often in under a minute.", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000011", + "completion": "0.0000044", + "request": "0", + "image": "0.0008415", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.000000275" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen2.5-coder-7b-instruct", + "canonical_slug": "qwen/qwen2.5-coder-7b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-Coder-7B-Instruct", + "name": "Qwen: Qwen2.5 Coder 7B Instruct", + "created": 1744734887, + "description": "Qwen2.5-Coder-7B-Instruct is a 7B parameter instruction-tuned language model optimized for code-related tasks such as code generation, reasoning, and bug fixing. Based on the Qwen2.5 architecture, it incorporates enhancements like RoPE, SwiGLU, RMSNorm, and GQA attention with support for up to 128K tokens using YaRN-based extrapolation. It is trained on a large corpus of source code, synthetic data, and text-code grounding, providing robust performance across programming languages and agentic coding workflows.\n\nThis model is part of the Qwen2.5-Coder family and offers strong compatibility with tools like vLLM for efficient deployment. Released under the Apache 2.0 license.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000003", + "completion": "0.00000009", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4.1", + "canonical_slug": "openai/gpt-4.1-2025-04-14", + "hugging_face_id": "", + "name": "OpenAI: GPT-4.1", + "created": 1744651385, + "description": "GPT-4.1 is a flagship large language model optimized for advanced instruction following, real-world software engineering, and long-context reasoning. It supports a 1 million token context window and outperforms GPT-4o and GPT-4.5 across coding (54.6% SWE-bench Verified), instruction compliance (87.4% IFEval), and multimodal understanding benchmarks. It is tuned for precise code diffs, agent reliability, and high recall in large document contexts, making it ideal for agents, IDE tooling, and enterprise knowledge retrieval.", + "context_length": 1047576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000008", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.0000005" + }, + "top_provider": { + "context_length": 1047576, + "max_completion_tokens": 32768, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4.1-mini", + "canonical_slug": "openai/gpt-4.1-mini-2025-04-14", + "hugging_face_id": "", + "name": "OpenAI: GPT-4.1 Mini", + "created": 1744651381, + "description": "GPT-4.1 Mini is a mid-sized model delivering performance competitive with GPT-4o at substantially lower latency and cost. It retains a 1 million token context window and scores 45.1% on hard instruction evals, 35.8% on MultiChallenge, and 84.1% on IFEval. Mini also shows strong coding ability (e.g., 31.6% on Aider’s polyglot diff benchmark) and vision understanding, making it suitable for interactive applications with tight performance constraints.", + "context_length": 1047576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.0000016", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.0000001" + }, + "top_provider": { + "context_length": 1047576, + "max_completion_tokens": 32768, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4.1-nano", + "canonical_slug": "openai/gpt-4.1-nano-2025-04-14", + "hugging_face_id": "", + "name": "OpenAI: GPT-4.1 Nano", + "created": 1744651369, + "description": "For tasks that demand low latency, GPT‑4.1 nano is the fastest and cheapest model in the GPT-4.1 series. It delivers exceptional performance at a small size with its 1 million token context window, and scores 80.1% on MMLU, 50.3% on GPQA, and 9.8% on Aider polyglot coding – even higher than GPT‑4o mini. It’s ideal for tasks like classification or autocompletion.", + "context_length": 1047576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["image", "text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.000000025" + }, + "top_provider": { + "context_length": 1047576, + "max_completion_tokens": 32768, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "eleutherai/llemma_7b", + "canonical_slug": "eleutherai/llemma_7b", + "hugging_face_id": "EleutherAI/llemma_7b", + "name": "EleutherAI: Llemma 7b", + "created": 1744643225, + "description": "Llemma 7B is a language model for mathematics. It was initialized with Code Llama 7B weights, and trained on the Proof-Pile-2 for 200B tokens. Llemma models are particularly strong at chain-of-thought mathematical reasoning and using computational tools for mathematics, such as Python and formal theorem provers.", + "context_length": 4096, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "code-llama" + }, + "pricing": { + "prompt": "0.0000008", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4096, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "alfredpros/codellama-7b-instruct-solidity", + "canonical_slug": "alfredpros/codellama-7b-instruct-solidity", + "hugging_face_id": "AlfredPros/CodeLlama-7b-Instruct-Solidity", + "name": "AlfredPros: CodeLLaMa 7B Instruct Solidity", + "created": 1744641874, + "description": "A finetuned 7 billion parameters Code LLaMA - Instruct model to generate Solidity smart contract using 4-bit QLoRA finetuning provided by PEFT library.", + "context_length": 4096, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "alpaca" + }, + "pricing": { + "prompt": "0.0000008", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4096, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "arliai/qwq-32b-arliai-rpr-v1:free", + "canonical_slug": "arliai/qwq-32b-arliai-rpr-v1", + "hugging_face_id": "ArliAI/QwQ-32B-ArliAI-RpR-v1", + "name": "ArliAI: QwQ 32B RpR v1 (free)", + "created": 1744555982, + "description": "QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series. It is designed to maintain coherence and reasoning across long multi-turn conversations by introducing explicit reasoning steps per dialogue turn, generated and refined using the base model itself.\n\nThe model was trained using RS-QLORA+ on 8K sequence lengths and supports up to 128K context windows (with practical performance around 32K). It is optimized for creative roleplay and dialogue generation, with an emphasis on minimizing cross-context repetition while preserving stylistic diversity.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "arliai/qwq-32b-arliai-rpr-v1", + "canonical_slug": "arliai/qwq-32b-arliai-rpr-v1", + "hugging_face_id": "ArliAI/QwQ-32B-ArliAI-RpR-v1", + "name": "ArliAI: QwQ 32B RpR v1", + "created": 1744555982, + "description": "QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series. It is designed to maintain coherence and reasoning across long multi-turn conversations by introducing explicit reasoning steps per dialogue turn, generated and refined using the base model itself.\n\nThe model was trained using RS-QLORA+ on 8K sequence lengths and supports up to 128K context windows (with practical performance around 32K). It is optimized for creative roleplay and dialogue generation, with an emphasis on minimizing cross-context repetition while preserving stylistic diversity.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.00000003", + "completion": "0.00000011", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "agentica-org/deepcoder-14b-preview:free", + "canonical_slug": "agentica-org/deepcoder-14b-preview", + "hugging_face_id": "agentica-org/DeepCoder-14B-Preview", + "name": "Agentica: Deepcoder 14B Preview (free)", + "created": 1744555395, + "description": "DeepCoder-14B-Preview is a 14B parameter code generation model fine-tuned from DeepSeek-R1-Distill-Qwen-14B using reinforcement learning with GRPO+ and iterative context lengthening. It is optimized for long-context program synthesis and achieves strong performance across coding benchmarks, including 60.6% on LiveCodeBench v5, competitive with models like o3-Mini", + "context_length": 96000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 96000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "agentica-org/deepcoder-14b-preview", + "canonical_slug": "agentica-org/deepcoder-14b-preview", + "hugging_face_id": "agentica-org/DeepCoder-14B-Preview", + "name": "Agentica: Deepcoder 14B Preview", + "created": 1744555395, + "description": "DeepCoder-14B-Preview is a 14B parameter code generation model fine-tuned from DeepSeek-R1-Distill-Qwen-14B using reinforcement learning with GRPO+ and iterative context lengthening. It is optimized for long-context program synthesis and achieves strong performance across coding benchmarks, including 60.6% on LiveCodeBench v5, competitive with models like o3-Mini", + "context_length": 96000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.000000015", + "completion": "0.000000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 96000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "x-ai/grok-3-mini-beta", + "canonical_slug": "x-ai/grok-3-mini-beta", + "hugging_face_id": "", + "name": "xAI: Grok 3 Mini Beta", + "created": 1744240195, + "description": "Grok 3 Mini is a lightweight, smaller thinking model. Unlike traditional models that generate answers immediately, Grok 3 Mini thinks before responding. It’s ideal for reasoning-heavy tasks that don’t demand extensive domain knowledge, and shines in math-specific and quantitative use cases, such as solving challenging puzzles or math problems.\n\nTransparent \"thinking\" traces accessible. Defaults to low reasoning, can boost with setting `reasoning: { effort: \"high\" }`\n\nNote: That there are two xAI endpoints for this model. By default when using this model we will always route you to the base endpoint. If you want the fast endpoint you can add `provider: { sort: throughput}`, to sort by throughput instead. \n", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Grok", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000075" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "logprobs", + "max_tokens", + "reasoning", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "x-ai/grok-3-beta", + "canonical_slug": "x-ai/grok-3-beta", + "hugging_face_id": "", + "name": "xAI: Grok 3 Beta", + "created": 1744240068, + "description": "Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in finance, healthcare, law, and science.\n\nExcels in structured tasks and benchmarks like GPQA, LCB, and MMLU-Pro where it outperforms Grok 3 Mini even on high thinking. \n\nNote: That there are two xAI endpoints for this model. By default when using this model we will always route you to the base endpoint. If you want the fast endpoint you can add `provider: { sort: throughput}`, to sort by throughput instead. \n", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Grok", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000075" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "nvidia/llama-3.1-nemotron-ultra-253b-v1", + "canonical_slug": "nvidia/llama-3.1-nemotron-ultra-253b-v1", + "hugging_face_id": "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1", + "name": "NVIDIA: Llama 3.1 Nemotron Ultra 253B v1", + "created": 1744115059, + "description": "Llama-3.1-Nemotron-Ultra-253B-v1 is a large language model (LLM) optimized for advanced reasoning, human-interactive chat, retrieval-augmented generation (RAG), and tool-calling tasks. Derived from Meta’s Llama-3.1-405B-Instruct, it has been significantly customized using Neural Architecture Search (NAS), resulting in enhanced efficiency, reduced memory usage, and improved inference latency. The model supports a context length of up to 128K tokens and can operate efficiently on an 8x NVIDIA H100 node.\n\nNote: you must include `detailed thinking on` in the system prompt to enable reasoning. Please see [Usage Recommendations](https://huggingface.co/nvidia/Llama-3_1-Nemotron-Ultra-253B-v1#quick-start-and-usage-recommendations) for more.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000006", + "completion": "0.0000018", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-4-maverick:free", + "canonical_slug": "meta-llama/llama-4-maverick-17b-128e-instruct", + "hugging_face_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct", + "name": "Meta: Llama 4 Maverick (free)", + "created": 1743881822, + "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward pass (400B total). It supports multilingual text and image input, and produces multilingual text and code output across 12 supported languages. Optimized for vision-language tasks, Maverick is instruction-tuned for assistant-like behavior, image reasoning, and general-purpose multimodal interaction.\n\nMaverick features early fusion for native multimodality and a 1 million token context window. It was trained on a curated mixture of public, licensed, and Meta-platform data, covering ~22 trillion tokens, with a knowledge cutoff in August 2024. Released on April 5, 2025 under the Llama 4 Community License, Maverick is suited for research and commercial applications requiring advanced multimodal understanding and high model throughput.", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Llama4", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4028, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "repetition_penalty", + "response_format", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-4-maverick", + "canonical_slug": "meta-llama/llama-4-maverick-17b-128e-instruct", + "hugging_face_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct", + "name": "Meta: Llama 4 Maverick", + "created": 1743881822, + "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward pass (400B total). It supports multilingual text and image input, and produces multilingual text and code output across 12 supported languages. Optimized for vision-language tasks, Maverick is instruction-tuned for assistant-like behavior, image reasoning, and general-purpose multimodal interaction.\n\nMaverick features early fusion for native multimodality and a 1 million token context window. It was trained on a curated mixture of public, licensed, and Meta-platform data, covering ~22 trillion tokens, with a knowledge cutoff in August 2024. Released on April 5, 2025 under the Llama 4 Community License, Maverick is suited for research and commercial applications requiring advanced multimodal understanding and high model throughput.", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Llama4", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.0000006", + "request": "0", + "image": "0.0006684", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-4-scout:free", + "canonical_slug": "meta-llama/llama-4-scout-17b-16e-instruct", + "hugging_face_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct", + "name": "Meta: Llama 4 Scout (free)", + "created": 1743881519, + "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input (text and image) and multilingual output (text and code) across 12 supported languages. Designed for assistant-style interaction and visual reasoning, Scout uses 16 experts per forward pass and features a context length of 10 million tokens, with a training corpus of ~40 trillion tokens.\n\nBuilt for high efficiency and local or commercial deployment, Llama 4 Scout incorporates early fusion for seamless modality integration. It is instruction-tuned for use in multilingual chat, captioning, and image understanding tasks. Released under the Llama 4 Community License, it was last trained on data up to August 2024 and launched publicly on April 5, 2025.", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Llama4", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4028, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "repetition_penalty", + "response_format", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-4-scout", + "canonical_slug": "meta-llama/llama-4-scout-17b-16e-instruct", + "hugging_face_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct", + "name": "Meta: Llama 4 Scout", + "created": 1743881519, + "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input (text and image) and multilingual output (text and code) across 12 supported languages. Designed for assistant-style interaction and visual reasoning, Scout uses 16 experts per forward pass and features a context length of 10 million tokens, with a training corpus of ~40 trillion tokens.\n\nBuilt for high efficiency and local or commercial deployment, Llama 4 Scout incorporates early fusion for seamless modality integration. It is instruction-tuned for use in multilingual chat, captioning, and image understanding tasks. Released under the Llama 4 Community License, it was last trained on data up to August 2024 and launched publicly on April 5, 2025.", + "context_length": 327680, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Llama4", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000008", + "completion": "0.0000003", + "request": "0", + "image": "0.0003342", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 327680, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen2.5-vl-32b-instruct:free", + "canonical_slug": "qwen/qwen2.5-vl-32b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-VL-32B-Instruct", + "name": "Qwen: Qwen2.5 VL 32B Instruct (free)", + "created": 1742839838, + "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities. It excels at visual analysis tasks, including object recognition, textual interpretation within images, and precise event localization in extended videos. Qwen2.5-VL-32B demonstrates state-of-the-art performance across multimodal benchmarks such as MMMU, MathVista, and VideoMME, while maintaining strong reasoning and clarity in text-based tasks like MMLU, mathematical problem-solving, and code generation.", + "context_length": 16384, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16384, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen2.5-vl-32b-instruct", + "canonical_slug": "qwen/qwen2.5-vl-32b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-VL-32B-Instruct", + "name": "Qwen: Qwen2.5 VL 32B Instruct", + "created": 1742839838, + "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities. It excels at visual analysis tasks, including object recognition, textual interpretation within images, and precise event localization in extended videos. Qwen2.5-VL-32B demonstrates state-of-the-art performance across multimodal benchmarks such as MMMU, MathVista, and VideoMME, while maintaining strong reasoning and clarity in text-based tasks like MMLU, mathematical problem-solving, and code generation.", + "context_length": 16384, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.00000022", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16384, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-chat-v3-0324:free", + "canonical_slug": "deepseek/deepseek-chat-v3-0324", + "hugging_face_id": "deepseek-ai/DeepSeek-V3-0324", + "name": "DeepSeek: DeepSeek V3 0324 (free)", + "created": 1742824755, + "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.\n\nIt succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well on a variety of tasks.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-chat-v3-0324", + "canonical_slug": "deepseek/deepseek-chat-v3-0324", + "hugging_face_id": "deepseek-ai/DeepSeek-V3-0324", + "name": "DeepSeek: DeepSeek V3 0324", + "created": 1742824755, + "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.\n\nIt succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well on a variety of tasks.", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000024", + "completion": "0.00000084", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": 163840, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/o1-pro", + "canonical_slug": "openai/o1-pro", + "hugging_face_id": "", + "name": "OpenAI: o1-pro", + "created": 1742423211, + "description": "The o1 series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o1-pro model uses more compute to think harder and provide consistently better answers.", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00015", + "completion": "0.0006", + "request": "0", + "image": "0.21675", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "response_format", + "seed", + "structured_outputs" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-small-3.1-24b-instruct:free", + "canonical_slug": "mistralai/mistral-small-3.1-24b-instruct-2503", + "hugging_face_id": "mistralai/Mistral-Small-3.1-24B-Instruct-2503", + "name": "Mistral: Mistral Small 3.1 24B (free)", + "created": 1742238937, + "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and vision tasks, including image analysis, programming, mathematical reasoning, and multilingual support across dozens of languages. Equipped with an extensive 128k token context window and optimized for efficient local inference, it supports use cases such as conversational agents, function calling, long-document comprehension, and privacy-sensitive deployments. The updated version is [Mistral Small 3.2](mistralai/mistral-small-3.2-24b-instruct)", + "context_length": 96000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 96000, + "max_completion_tokens": 96000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mistral-small-3.1-24b-instruct", + "canonical_slug": "mistralai/mistral-small-3.1-24b-instruct-2503", + "hugging_face_id": "mistralai/Mistral-Small-3.1-24B-Instruct-2503", + "name": "Mistral: Mistral Small 3.1 24B", + "created": 1742238937, + "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and vision tasks, including image analysis, programming, mathematical reasoning, and multilingual support across dozens of languages. Equipped with an extensive 128k token context window and optimized for efficient local inference, it supports use cases such as conversational agents, function calling, long-document comprehension, and privacy-sensitive deployments. The updated version is [Mistral Small 3.2](mistralai/mistral-small-3.2-24b-instruct)", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.00000022", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "allenai/olmo-2-0325-32b-instruct", + "canonical_slug": "allenai/olmo-2-0325-32b-instruct", + "hugging_face_id": "allenai/OLMo-2-0325-32B-Instruct", + "name": "AllenAI: Olmo 2 32B Instruct", + "created": 1741988556, + "description": "OLMo-2 32B Instruct is a supervised instruction-finetuned variant of the OLMo-2 32B March 2025 base model. It excels in complex reasoning and instruction-following tasks across diverse benchmarks such as GSM8K, MATH, IFEval, and general NLP evaluation. Developed by AI2, OLMo-2 32B is part of an open, research-oriented initiative, trained primarily on English-language datasets to advance the understanding and development of open-source language models.", + "context_length": 4096, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.00000035", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4096, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemma-3-4b-it:free", + "canonical_slug": "google/gemma-3-4b-it", + "hugging_face_id": "google/gemma-3-4b-it", + "name": "Google: Gemma 3 4B (free)", + "created": 1741905510, + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling.", + "context_length": 32768, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs", + "temperature", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemma-3-4b-it", + "canonical_slug": "google/gemma-3-4b-it", + "hugging_face_id": "google/gemma-3-4b-it", + "name": "Google: Gemma 3 4B", + "created": 1741905510, + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling.", + "context_length": 96000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0.00000001703012", + "completion": "0.0000000681536", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 96000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemma-3-12b-it:free", + "canonical_slug": "google/gemma-3-12b-it", + "hugging_face_id": "google/gemma-3-12b-it", + "name": "Google: Gemma 3 12B (free)", + "created": 1741902625, + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Gemma 3 12B is the second largest in the family of Gemma 3 models after [Gemma 3 27B](google/gemma-3-27b-it)", + "context_length": 32768, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "seed", "temperature", "top_p"], + "default_parameters": {} + }, + { + "id": "google/gemma-3-12b-it", + "canonical_slug": "google/gemma-3-12b-it", + "hugging_face_id": "google/gemma-3-12b-it", + "name": "Google: Gemma 3 12B", + "created": 1741902625, + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Gemma 3 12B is the second largest in the family of Gemma 3 models after [Gemma 3 27B](google/gemma-3-27b-it)", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0.00000003", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "cohere/command-a", + "canonical_slug": "cohere/command-a-03-2025", + "hugging_face_id": "CohereForAI/c4ai-command-a-03-2025", + "name": "Cohere: Command A", + "created": 1741894342, + "description": "Command A is an open-weights 111B parameter model with a 256k context window focused on delivering great performance across agentic, multilingual, and coding use cases.\nCompared to other leading proprietary and open-weights models Command A delivers maximum performance with minimum hardware costs, excelling on business-critical agentic and multilingual tasks.", + "context_length": 256000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": 8192, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4o-mini-search-preview", + "canonical_slug": "openai/gpt-4o-mini-search-preview-2025-03-11", + "hugging_face_id": "", + "name": "OpenAI: GPT-4o-mini Search Preview", + "created": 1741818122, + "description": "GPT-4o mini Search Preview is a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.0000006", + "request": "0.0275", + "image": "0.000217", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "structured_outputs", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4o-search-preview", + "canonical_slug": "openai/gpt-4o-search-preview-2025-03-11", + "hugging_face_id": "", + "name": "OpenAI: GPT-4o Search Preview", + "created": 1741817949, + "description": "GPT-4o Search Previewis a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.00001", + "request": "0.035", + "image": "0.003613", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "structured_outputs", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "google/gemma-3-27b-it:free", + "canonical_slug": "google/gemma-3-27b-it", + "hugging_face_id": "", + "name": "Google: Gemma 3 27B (free)", + "created": 1741756359, + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Gemma 3 27B is Google's latest open source model, successor to [Gemma 2](google/gemma-2-27b-it)", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemma-3-27b-it", + "canonical_slug": "google/gemma-3-27b-it", + "hugging_face_id": "", + "name": "Google: Gemma 3 27B", + "created": 1741756359, + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Gemma 3 27B is Google's latest open source model, successor to [Gemma 2](google/gemma-2-27b-it)", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0.00000009", + "completion": "0.00000016", + "request": "0", + "image": "0.0000256", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "thedrummer/skyfall-36b-v2", + "canonical_slug": "thedrummer/skyfall-36b-v2", + "hugging_face_id": "TheDrummer/Skyfall-36B-v2", + "name": "TheDrummer: Skyfall 36B V2", + "created": 1741636566, + "description": "Skyfall 36B v2 is an enhanced iteration of Mistral Small 2501, specifically fine-tuned for improved creativity, nuanced writing, role-playing, and coherent storytelling.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000005", + "completion": "0.0000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "microsoft/phi-4-multimodal-instruct", + "canonical_slug": "microsoft/phi-4-multimodal-instruct", + "hugging_face_id": "microsoft/Phi-4-multimodal-instruct", + "name": "Microsoft: Phi 4 Multimodal Instruct", + "created": 1741396284, + "description": "Phi-4 Multimodal Instruct is a versatile 5.6B parameter foundation model that combines advanced reasoning and instruction-following capabilities across both text and visual inputs, providing accurate text outputs. The unified architecture enables efficient, low-latency inference, suitable for edge and mobile deployments. Phi-4 Multimodal Instruct supports text inputs in multiple languages including Arabic, Chinese, English, French, German, Japanese, Spanish, and more, with visual input optimized primarily for English. It delivers impressive performance on multimodal tasks involving mathematical, scientific, and document reasoning, providing developers and enterprises a powerful yet compact model for sophisticated interactive applications. For more information, see the [Phi-4 Multimodal blog post](https://azure.microsoft.com/en-us/blog/empowering-innovation-the-next-generation-of-the-phi-family/).\n", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.0000001", + "request": "0", + "image": "0.00017685", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "perplexity/sonar-reasoning-pro", + "canonical_slug": "perplexity/sonar-reasoning-pro", + "hugging_face_id": "", + "name": "Perplexity: Sonar Reasoning Pro", + "created": 1741313308, + "description": "Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro)\n\nSonar Reasoning Pro is a premier reasoning model powered by DeepSeek R1 with Chain of Thought (CoT). Designed for advanced use cases, it supports in-depth, multi-step queries with a larger context window and can surface more citations per search, enabling more comprehensive and extensible responses.", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000008", + "request": "0", + "image": "0", + "web_search": "0.005", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "temperature", + "top_k", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "perplexity/sonar-pro", + "canonical_slug": "perplexity/sonar-pro", + "hugging_face_id": "", + "name": "Perplexity: Sonar Pro", + "created": 1741312423, + "description": "Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro)\n\nFor enterprises seeking more advanced capabilities, the Sonar Pro API can handle in-depth, multi-step queries with added extensibility, like double the number of citations per search as Sonar on average. Plus, with a larger context window, it can handle longer and more nuanced searches and follow-up questions. ", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0", + "web_search": "0.005", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 8000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "temperature", + "top_k", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "perplexity/sonar-deep-research", + "canonical_slug": "perplexity/sonar-deep-research", + "hugging_face_id": "", + "name": "Perplexity: Sonar Deep Research", + "created": 1741311246, + "description": "Sonar Deep Research is a research-focused model designed for multi-step retrieval, synthesis, and reasoning across complex topics. It autonomously searches, reads, and evaluates sources, refining its approach as it gathers information. This enables comprehensive report generation across domains like finance, technology, health, and current events.\n\nNotes on Pricing ([Source](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-deep-research)) \n- Input tokens comprise of Prompt tokens (user prompt) + Citation tokens (these are processed tokens from running searches)\n- Deep Research runs multiple searches to conduct exhaustive research. Searches are priced at $5/1000 searches. A request that does 30 searches will cost $0.15 in this step.\n- Reasoning is a distinct step in Deep Research since it does extensive automated reasoning through all the material it gathers during its research phase. Reasoning tokens here are a bit different than the CoTs in the answer - these are tokens that we use to reason through the research material prior to generating the outputs via the CoTs. Reasoning tokens are priced at $3/1M tokens", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000008", + "request": "0", + "image": "0", + "web_search": "0.005", + "internal_reasoning": "0.000003" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "temperature", + "top_k", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwq-32b", + "canonical_slug": "qwen/qwq-32b", + "hugging_face_id": "Qwen/QwQ-32B", + "name": "Qwen: QwQ 32B", + "created": 1741208814, + "description": "QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks, especially hard problems. QwQ-32B is the medium-sized reasoning model, which is capable of achieving competitive performance against state-of-the-art reasoning models, e.g., DeepSeek-R1, o1-mini.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "qwq" + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemini-2.0-flash-lite-001", + "canonical_slug": "google/gemini-2.0-flash-lite-001", + "hugging_face_id": "", + "name": "Google: Gemini 2.0 Flash Lite", + "created": 1740506212, + "description": "Gemini 2.0 Flash Lite offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5), all at extremely economical token prices.", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file", "audio", "video"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000000075", + "completion": "0.0000003", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "anthropic/claude-3.7-sonnet:thinking", + "canonical_slug": "anthropic/claude-3-7-sonnet-20250219", + "hugging_face_id": "", + "name": "Anthropic: Claude 3.7 Sonnet (thinking)", + "created": 1740422110, + "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. \n\nClaude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks.\n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0.0048", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 64000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "stop", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "anthropic/claude-3.7-sonnet", + "canonical_slug": "anthropic/claude-3-7-sonnet-20250219", + "hugging_face_id": "", + "name": "Anthropic: Claude 3.7 Sonnet", + "created": 1740422110, + "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. \n\nClaude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks.\n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0.0048", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 64000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-saba", + "canonical_slug": "mistralai/mistral-saba-2502", + "hugging_face_id": "", + "name": "Mistral: Saba", + "created": 1739803239, + "description": "Mistral Saba is a 24B-parameter language model specifically designed for the Middle East and South Asia, delivering accurate and contextually relevant responses while maintaining efficient performance. Trained on curated regional datasets, it supports multiple Indian-origin languages—including Tamil and Malayalam—alongside Arabic. This makes it a versatile option for a range of regional and multilingual applications. Read more at the blog post [here](https://mistral.ai/en/news/mistral-saba)", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "meta-llama/llama-guard-3-8b", + "canonical_slug": "meta-llama/llama-guard-3-8b", + "hugging_face_id": "meta-llama/Llama-Guard-3-8B", + "name": "Llama Guard 3 8B", + "created": 1739401318, + "description": "Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM – it generates text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.\n\nLlama Guard 3 was aligned to safeguard against the MLCommons standardized hazards taxonomy and designed to support Llama 3.1 capabilities. Specifically, it provides content moderation in 8 languages, and was optimized to support safety and security for search and code interpreter tool calls.\n", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "none" + }, + "pricing": { + "prompt": "0.00000002", + "completion": "0.00000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/o3-mini-high", + "canonical_slug": "openai/o3-mini-high-2025-01-31", + "hugging_face_id": "", + "name": "OpenAI: o3 Mini High", + "created": 1739372611, + "description": "OpenAI o3-mini-high is the same model as [o3-mini](/openai/o3-mini) with reasoning_effort set to high. \n\no3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding. The model features three adjustable reasoning effort levels and supports key developer capabilities including function calling, structured outputs, and streaming, though it does not include vision processing capabilities.\n\nThe model demonstrates significant improvements over its predecessor, with expert testers preferring its responses 56% of the time and noting a 39% reduction in major errors on complex questions. With medium reasoning effort settings, o3-mini matches the performance of the larger o1 model on challenging reasoning evaluations like AIME and GPQA, while maintaining lower latency and cost.", + "context_length": 200000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000011", + "completion": "0.0000044", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000055" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "google/gemini-2.0-flash-001", + "canonical_slug": "google/gemini-2.0-flash-001", + "hugging_face_id": "", + "name": "Google: Gemini 2.0 Flash", + "created": 1738769413, + "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It introduces notable enhancements in multimodal understanding, coding capabilities, complex instruction following, and function calling. These advancements come together to deliver more seamless and robust agentic experiences.", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file", "audio", "video"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000004", + "request": "0", + "image": "0.0000258", + "audio": "0.0000007", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000025", + "input_cache_write": "0.0000001833" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen-vl-plus", + "canonical_slug": "qwen/qwen-vl-plus", + "hugging_face_id": "", + "name": "Qwen: Qwen VL Plus", + "created": 1738731255, + "description": "Qwen's Enhanced Large Visual Language Model. Significantly upgraded for detailed recognition capabilities and text recognition abilities, supporting ultra-high pixel resolutions up to millions of pixels and extreme aspect ratios for image input. It delivers significant performance across a broad range of visual tasks.\n", + "context_length": 7500, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000021", + "completion": "0.00000063", + "request": "0", + "image": "0.0002688", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 7500, + "max_completion_tokens": 1500, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "temperature", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "aion-labs/aion-1.0", + "canonical_slug": "aion-labs/aion-1.0", + "hugging_face_id": "", + "name": "AionLabs: Aion-1.0", + "created": 1738697557, + "description": "Aion-1.0 is a multi-model system designed for high performance across various tasks, including reasoning and coding. It is built on DeepSeek-R1, augmented with additional models and techniques such as Tree of Thoughts (ToT) and Mixture of Experts (MoE). It is Aion Lab's most powerful reasoning model.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000004", + "completion": "0.000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "temperature", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "aion-labs/aion-1.0-mini", + "canonical_slug": "aion-labs/aion-1.0-mini", + "hugging_face_id": "FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview", + "name": "AionLabs: Aion-1.0-Mini", + "created": 1738697107, + "description": "Aion-1.0-Mini 32B parameter model is a distilled version of the DeepSeek-R1 model, designed for strong performance in reasoning domains such as mathematics, coding, and logic. It is a modified variant of a FuseAI model that outperforms R1-Distill-Qwen-32B and R1-Distill-Llama-70B, with benchmark results available on its [Hugging Face page](https://huggingface.co/FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview), independently replicated for verification.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000007", + "completion": "0.0000014", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "include_reasoning", + "max_tokens", + "reasoning", + "temperature", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "aion-labs/aion-rp-llama-3.1-8b", + "canonical_slug": "aion-labs/aion-rp-llama-3.1-8b", + "hugging_face_id": "", + "name": "AionLabs: Aion-RP 1.0 (8B)", + "created": 1738696718, + "description": "Aion-RP-Llama-3.1-8B ranks the highest in the character evaluation portion of the RPBench-Auto benchmark, a roleplaying-specific variant of Arena-Hard-Auto, where LLMs evaluate each other’s responses. It is a fine-tuned base model rather than an instruct model, designed to produce more natural and varied writing.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "temperature", "top_p"], + "default_parameters": {} + }, + { + "id": "qwen/qwen-vl-max", + "canonical_slug": "qwen/qwen-vl-max-2025-01-25", + "hugging_face_id": "", + "name": "Qwen: Qwen VL Max", + "created": 1738434304, + "description": "Qwen VL Max is a visual understanding model with 7500 tokens context length. It excels in delivering optimal performance for a broader spectrum of complex tasks.\n", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000008", + "completion": "0.0000032", + "request": "0", + "image": "0.001024", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "qwen/qwen-turbo", + "canonical_slug": "qwen/qwen-turbo-2024-11-01", + "hugging_face_id": "", + "name": "Qwen: Qwen-Turbo", + "created": 1738410974, + "description": "Qwen-Turbo, based on Qwen2.5, is a 1M context model that provides fast speed and low cost, suitable for simple tasks.", + "context_length": 1000000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000002" + }, + "top_provider": { + "context_length": 1000000, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen2.5-vl-72b-instruct", + "canonical_slug": "qwen/qwen2.5-vl-72b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-VL-72B-Instruct", + "name": "Qwen: Qwen2.5 VL 72B Instruct", + "created": 1738410311, + "description": "Qwen2.5-VL is proficient in recognizing common objects such as flowers, birds, fish, and insects. It is also highly capable of analyzing texts, charts, icons, graphics, and layouts within images.", + "context_length": 32768, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000008", + "completion": "0.00000033", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen-plus", + "canonical_slug": "qwen/qwen-plus-2025-01-25", + "hugging_face_id": "", + "name": "Qwen: Qwen-Plus", + "created": 1738409840, + "description": "Qwen-Plus, based on the Qwen2.5 foundation model, is a 131K context model with a balanced performance, speed, and cost combination.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000016" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen-max", + "canonical_slug": "qwen/qwen-max-2025-01-25", + "hugging_face_id": "", + "name": "Qwen: Qwen-Max ", + "created": 1738402289, + "description": "Qwen-Max, based on Qwen2.5, provides the best inference performance among [Qwen models](/qwen), especially for complex multi-step tasks. It's a large-scale MoE model that has been pretrained on over 20 trillion tokens and further post-trained with curated Supervised Fine-Tuning (SFT) and Reinforcement Learning from Human Feedback (RLHF) methodologies. The parameter count is unknown.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000016", + "completion": "0.0000064", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000064" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/o3-mini", + "canonical_slug": "openai/o3-mini-2025-01-31", + "hugging_face_id": "", + "name": "OpenAI: o3 Mini", + "created": 1738351721, + "description": "OpenAI o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding.\n\nThis model supports the `reasoning_effort` parameter, which can be set to \"high\", \"medium\", or \"low\" to control the thinking time of the model. The default is \"medium\". OpenRouter also offers the model slug `openai/o3-mini-high` to default the parameter to \"high\".\n\nThe model features three adjustable reasoning effort levels and supports key developer capabilities including function calling, structured outputs, and streaming, though it does not include vision processing capabilities.\n\nThe model demonstrates significant improvements over its predecessor, with expert testers preferring its responses 56% of the time and noting a 39% reduction in major errors on complex questions. With medium reasoning effort settings, o3-mini matches the performance of the larger o1 model on challenging reasoning evaluations like AIME and GPQA, while maintaining lower latency and cost.", + "context_length": 200000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000011", + "completion": "0.0000044", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000055" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-small-24b-instruct-2501:free", + "canonical_slug": "mistralai/mistral-small-24b-instruct-2501", + "hugging_face_id": "mistralai/Mistral-Small-24B-Instruct-2501", + "name": "Mistral: Mistral Small 3 (free)", + "created": 1738255409, + "description": "Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks. Released under the Apache 2.0 license, it features both pre-trained and instruction-tuned versions designed for efficient local deployment.\n\nThe model achieves 81% accuracy on the MMLU benchmark and performs competitively with larger models like Llama 3.3 70B and Qwen 32B, while operating at three times the speed on equivalent hardware. [Read the blog post about the model here.](https://mistral.ai/news/mistral-small-3/)", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mistral-small-24b-instruct-2501", + "canonical_slug": "mistralai/mistral-small-24b-instruct-2501", + "hugging_face_id": "mistralai/Mistral-Small-24B-Instruct-2501", + "name": "Mistral: Mistral Small 3", + "created": 1738255409, + "description": "Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks. Released under the Apache 2.0 license, it features both pre-trained and instruction-tuned versions designed for efficient local deployment.\n\nThe model achieves 81% accuracy on the MMLU benchmark and performs competitively with larger models like Llama 3.3 70B and Qwen 32B, while operating at three times the speed on equivalent hardware. [Read the blog post about the model here.](https://mistral.ai/news/mistral-small-3/)", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.00000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "deepseek/deepseek-r1-distill-qwen-32b", + "canonical_slug": "deepseek/deepseek-r1-distill-qwen-32b", + "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + "name": "DeepSeek: R1 Distill Qwen 32B", + "created": 1738194830, + "description": "DeepSeek R1 Distill Qwen 32B is a distilled large language model based on [Qwen 2.5 32B](https://huggingface.co/Qwen/Qwen2.5-32B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.\\n\\nOther benchmark results include:\\n\\n- AIME 2024 pass@1: 72.6\\n- MATH-500 pass@1: 94.3\\n- CodeForces Rating: 1691\\n\\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.00000027", + "completion": "0.00000027", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-r1-distill-qwen-14b", + "canonical_slug": "deepseek/deepseek-r1-distill-qwen-14b", + "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", + "name": "DeepSeek: R1 Distill Qwen 14B", + "created": 1738193940, + "description": "DeepSeek R1 Distill Qwen 14B is a distilled large language model based on [Qwen 2.5 14B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.\n\nOther benchmark results include:\n\n- AIME 2024 pass@1: 69.7\n- MATH-500 pass@1: 93.9\n- CodeForces Rating: 1481\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.00000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "perplexity/sonar-reasoning", + "canonical_slug": "perplexity/sonar-reasoning", + "hugging_face_id": "", + "name": "Perplexity: Sonar Reasoning", + "created": 1738131107, + "description": "Sonar Reasoning is a reasoning model provided by Perplexity based on [DeepSeek R1](/deepseek/deepseek-r1).\n\nIt allows developers to utilize long chain of thought with built-in web search. Sonar Reasoning is uncensored and hosted in US datacenters. ", + "context_length": 127000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.000001", + "completion": "0.000005", + "request": "0.005", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 127000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "temperature", + "top_k", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "perplexity/sonar", + "canonical_slug": "perplexity/sonar", + "hugging_face_id": "", + "name": "Perplexity: Sonar", + "created": 1738013808, + "description": "Sonar is lightweight, affordable, fast, and simple to use — now featuring citations and the ability to customize sources. It is designed for companies seeking to integrate lightweight question-and-answer features optimized for speed.", + "context_length": 127072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000001", + "completion": "0.000001", + "request": "0.005", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 127072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "temperature", + "top_k", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-r1-distill-llama-70b:free", + "canonical_slug": "deepseek/deepseek-r1-distill-llama-70b", + "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + "name": "DeepSeek: R1 Distill Llama 70B (free)", + "created": 1737663169, + "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across multiple benchmarks, including:\n\n- AIME 2024 pass@1: 70.0\n- MATH-500 pass@1: 94.5\n- CodeForces Rating: 1633\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-r1-distill-llama-70b", + "canonical_slug": "deepseek/deepseek-r1-distill-llama-70b", + "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + "name": "DeepSeek: R1 Distill Llama 70B", + "created": 1737663169, + "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across multiple benchmarks, including:\n\n- AIME 2024 pass@1: 70.0\n- MATH-500 pass@1: 94.5\n- CodeForces Rating: 1633\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.00000003", + "completion": "0.00000013", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-r1:free", + "canonical_slug": "deepseek/deepseek-r1", + "hugging_face_id": "deepseek-ai/DeepSeek-R1", + "name": "DeepSeek: R1 (free)", + "created": 1737381095, + "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model & [technical report](https://api-docs.deepseek.com/news/news250120).\n\nMIT licensed: Distill & commercialize freely!", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-r1", + "canonical_slug": "deepseek/deepseek-r1", + "hugging_face_id": "deepseek-ai/DeepSeek-R1", + "name": "DeepSeek: R1", + "created": 1737381095, + "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model & [technical report](https://api-docs.deepseek.com/news/news250120).\n\nMIT licensed: Distill & commercialize freely!", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "include_reasoning", + "max_tokens", + "min_p", + "presence_penalty", + "reasoning", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "minimax/minimax-01", + "canonical_slug": "minimax/minimax-01", + "hugging_face_id": "MiniMaxAI/MiniMax-Text-01", + "name": "MiniMax: MiniMax-01", + "created": 1736915462, + "description": "MiniMax-01 is a combines MiniMax-Text-01 for text generation and MiniMax-VL-01 for image understanding. It has 456 billion parameters, with 45.9 billion parameters activated per inference, and can handle a context of up to 4 million tokens.\n\nThe text model adopts a hybrid architecture that combines Lightning Attention, Softmax Attention, and Mixture-of-Experts (MoE). The image model adopts the “ViT-MLP-LLM” framework and is trained on top of the text model.\n\nTo read more about the release, see: https://www.minimaxi.com/en/news/minimax-01-series-2", + "context_length": 1000192, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000011", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1000192, + "max_completion_tokens": 1000192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "temperature", "top_p"], + "default_parameters": {} + }, + { + "id": "mistralai/codestral-2501", + "canonical_slug": "mistralai/codestral-2501", + "hugging_face_id": "", + "name": "Mistral: Codestral 2501", + "created": 1736895522, + "description": "[Mistral](/mistralai)'s cutting-edge language model for coding. Codestral specializes in low-latency, high-frequency tasks such as fill-in-the-middle (FIM), code correction and test generation. \n\nLearn more on their blog post: https://mistral.ai/news/codestral-2501/", + "context_length": 256000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000009", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "microsoft/phi-4", + "canonical_slug": "microsoft/phi-4", + "hugging_face_id": "microsoft/phi-4", + "name": "Microsoft: Phi 4", + "created": 1736489872, + "description": "[Microsoft Research](/microsoft) Phi-4 is designed to perform well in complex reasoning tasks and can operate efficiently in situations with limited memory or where quick responses are needed. \n\nAt 14 billion parameters, it was trained on a mix of high-quality synthetic datasets, data from curated websites, and academic materials. It has undergone careful improvement to follow instructions accurately and maintain strong safety standards. It works best with English language inputs.\n\nFor more information, please see [Phi-4 Technical Report](https://arxiv.org/pdf/2412.08905)\n", + "context_length": 16384, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000006", + "completion": "0.00000014", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16384, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "sao10k/l3.1-70b-hanami-x1", + "canonical_slug": "sao10k/l3.1-70b-hanami-x1", + "hugging_face_id": "Sao10K/L3.1-70B-Hanami-x1", + "name": "Sao10K: Llama 3.1 70B Hanami x1", + "created": 1736302854, + "description": "This is [Sao10K](/sao10k)'s experiment over [Euryale v2.2](/sao10k/l3.1-euryale-70b).", + "context_length": 16000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000003", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "deepseek/deepseek-chat", + "canonical_slug": "deepseek/deepseek-chat-v3", + "hugging_face_id": "deepseek-ai/DeepSeek-V3", + "name": "DeepSeek: DeepSeek V3", + "created": 1735241320, + "description": "DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.\n\nFor model details, please visit [the DeepSeek-V3 repo](https://github.com/deepseek-ai/DeepSeek-V3) for more information, or see the [launch announcement](https://api-docs.deepseek.com/news/news1226).", + "context_length": 163840, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": 163840, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "sao10k/l3.3-euryale-70b", + "canonical_slug": "sao10k/l3.3-euryale-70b-v2.3", + "hugging_face_id": "Sao10K/L3.3-70B-Euryale-v2.3", + "name": "Sao10K: Llama 3.3 Euryale 70B", + "created": 1734535928, + "description": "Euryale L3.3 70B is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.2](/models/sao10k/l3-euryale-70b).", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000065", + "completion": "0.00000075", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/o1", + "canonical_slug": "openai/o1-2024-12-17", + "hugging_face_id": "", + "name": "OpenAI: o1", + "created": 1734459999, + "description": "The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding. The o1 model series is trained with large-scale reinforcement learning to reason using chain of thought. \n\nThe o1 models are optimized for math, science, programming, and other STEM-related tasks. They consistently exhibit PhD-level accuracy on benchmarks in physics, chemistry, and biology. Learn more in the [launch announcement](https://openai.com/o1).\n", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000015", + "completion": "0.00006", + "request": "0", + "image": "0.021675", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000075" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "structured_outputs", + "tool_choice", + "tools" + ], + "default_parameters": {} + }, + { + "id": "cohere/command-r7b-12-2024", + "canonical_slug": "cohere/command-r7b-12-2024", + "hugging_face_id": "", + "name": "Cohere: Command R7B (12-2024)", + "created": 1734158152, + "description": "Command R7B (12-2024) is a small, fast update of the Command R+ model, delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.\n\nUse of this model is subject to Cohere's [Usage Policy](https://docs.cohere.com/docs/usage-policy) and [SaaS Agreement](https://cohere.com/saas-agreement).", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Cohere", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000000375", + "completion": "0.00000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemini-2.0-flash-exp:free", + "canonical_slug": "google/gemini-2.0-flash-exp", + "hugging_face_id": "", + "name": "Google: Gemini 2.0 Flash Experimental (free)", + "created": 1733937523, + "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It introduces notable enhancements in multimodal understanding, coding capabilities, complex instruction following, and function calling. These advancements come together to deliver more seamless and robust agentic experiences.", + "context_length": 1048576, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.3-70b-instruct:free", + "canonical_slug": "meta-llama/llama-3.3-70b-instruct", + "hugging_face_id": "meta-llama/Llama-3.3-70B-Instruct", + "name": "Meta: Llama 3.3 70B Instruct (free)", + "created": 1733506137, + "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks.\n\nSupported languages: English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai.\n\n[Model Card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/MODEL_CARD.md)", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.3-70b-instruct", + "canonical_slug": "meta-llama/llama-3.3-70b-instruct", + "hugging_face_id": "meta-llama/Llama-3.3-70B-Instruct", + "name": "Meta: Llama 3.3 70B Instruct", + "created": 1733506137, + "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks.\n\nSupported languages: English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai.\n\n[Model Card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/MODEL_CARD.md)", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000013", + "completion": "0.00000038", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "amazon/nova-lite-v1", + "canonical_slug": "amazon/nova-lite-v1", + "hugging_face_id": "", + "name": "Amazon: Nova Lite 1.0", + "created": 1733437363, + "description": "Amazon Nova Lite 1.0 is a very low-cost multimodal model from Amazon that focused on fast processing of image, video, and text inputs to generate text output. Amazon Nova Lite can handle real-time customer interactions, document analysis, and visual question-answering tasks with high accuracy.\n\nWith an input context of 300K tokens, it can analyze multiple images or up to 30 minutes of video in a single input.", + "context_length": 300000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Nova", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000006", + "completion": "0.00000024", + "request": "0", + "image": "0.00009", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 300000, + "max_completion_tokens": 5120, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "amazon/nova-micro-v1", + "canonical_slug": "amazon/nova-micro-v1", + "hugging_face_id": "", + "name": "Amazon: Nova Micro 1.0", + "created": 1733437237, + "description": "Amazon Nova Micro 1.0 is a text-only model that delivers the lowest latency responses in the Amazon Nova family of models at a very low cost. With a context length of 128K tokens and optimized for speed and cost, Amazon Nova Micro excels at tasks such as text summarization, translation, content classification, interactive chat, and brainstorming. It has simple mathematical reasoning and coding abilities.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Nova", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000000035", + "completion": "0.00000014", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 5120, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "amazon/nova-pro-v1", + "canonical_slug": "amazon/nova-pro-v1", + "hugging_face_id": "", + "name": "Amazon: Nova Pro 1.0", + "created": 1733436303, + "description": "Amazon Nova Pro 1.0 is a capable multimodal model from Amazon focused on providing a combination of accuracy, speed, and cost for a wide range of tasks. As of December 2024, it achieves state-of-the-art performance on key benchmarks including visual question answering (TextVQA) and video understanding (VATEX).\n\nAmazon Nova Pro demonstrates strong capabilities in processing both visual and textual information and at analyzing financial documents.\n\n**NOTE**: Video input is not supported at this time.", + "context_length": 300000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Nova", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000008", + "completion": "0.0000032", + "request": "0", + "image": "0.0012", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 300000, + "max_completion_tokens": 5120, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4o-2024-11-20", + "canonical_slug": "openai/gpt-4o-2024-11-20", + "hugging_face_id": "", + "name": "OpenAI: GPT-4o (2024-11-20)", + "created": 1732127594, + "description": "The 2024-11-20 version of GPT-4o offers a leveled-up creative writing ability with more natural, engaging, and tailored writing to improve relevance & readability. It’s also better at working with uploaded files, providing deeper insights & more thorough responses.\n\nGPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.00001", + "request": "0", + "image": "0.003613", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000125" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-large-2411", + "canonical_slug": "mistralai/mistral-large-2411", + "hugging_face_id": "", + "name": "Mistral Large 2411", + "created": 1731978685, + "description": "Mistral Large 2 2411 is an update of [Mistral Large 2](/mistralai/mistral-large) released together with [Pixtral Large 2411](/mistralai/pixtral-large-2411)\n\nIt provides a significant upgrade on the previous [Mistral Large 24.07](/mistralai/mistral-large-2407), with notable improvements in long context understanding, a new system prompt, and more accurate function calling.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mistral-large-2407", + "canonical_slug": "mistralai/mistral-large-2407", + "hugging_face_id": "", + "name": "Mistral Large 2407", + "created": 1731978415, + "description": "This is Mistral AI's flagship model, Mistral Large 2 (version mistral-large-2407). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/).\n\nIt supports dozens of languages including French, German, Spanish, Italian, Portuguese, Arabic, Hindi, Russian, Chinese, Japanese, and Korean, along with 80+ coding languages including Python, Java, C, C++, JavaScript, and Bash. Its long context window allows precise information recall from large documents.\n", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/pixtral-large-2411", + "canonical_slug": "mistralai/pixtral-large-2411", + "hugging_face_id": "", + "name": "Mistral: Pixtral Large 2411", + "created": 1731977388, + "description": "Pixtral Large is a 124B parameter, open-weight, multimodal model built on top of [Mistral Large 2](/mistralai/mistral-large-2411). The model is able to understand documents, charts and natural images.\n\nThe model is available under the Mistral Research License (MRL) for research and educational use, and the Mistral Commercial License for experimentation, testing, and production for commercial purposes.\n\n", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000006", + "request": "0", + "image": "0.002888", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "qwen/qwen-2.5-coder-32b-instruct:free", + "canonical_slug": "qwen/qwen-2.5-coder-32b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-Coder-32B-Instruct", + "name": "Qwen2.5 Coder 32B Instruct (free)", + "created": 1731368400, + "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). Qwen2.5-Coder brings the following improvements upon CodeQwen1.5:\n\n- Significantly improvements in **code generation**, **code reasoning** and **code fixing**. \n- A more comprehensive foundation for real-world applications such as **Code Agents**. Not only enhancing coding capabilities but also maintaining its strengths in mathematics and general competencies.\n\nTo read more about its evaluation results, check out [Qwen 2.5 Coder's blog](https://qwenlm.github.io/blog/qwen2.5-coder-family/).", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen-2.5-coder-32b-instruct", + "canonical_slug": "qwen/qwen-2.5-coder-32b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-Coder-32B-Instruct", + "name": "Qwen2.5 Coder 32B Instruct", + "created": 1731368400, + "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). Qwen2.5-Coder brings the following improvements upon CodeQwen1.5:\n\n- Significantly improvements in **code generation**, **code reasoning** and **code fixing**. \n- A more comprehensive foundation for real-world applications such as **Code Agents**. Not only enhancing coding capabilities but also maintaining its strengths in mathematics and general competencies.\n\nTo read more about its evaluation results, check out [Qwen 2.5 Coder's blog](https://qwenlm.github.io/blog/qwen2.5-coder-family/).", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.00000004", + "completion": "0.00000016", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "raifle/sorcererlm-8x22b", + "canonical_slug": "raifle/sorcererlm-8x22b", + "hugging_face_id": "rAIfle/SorcererLM-8x22b-bf16", + "name": "SorcererLM 8x22B", + "created": 1731105083, + "description": "SorcererLM is an advanced RP and storytelling model, built as a Low-rank 16-bit LoRA fine-tuned on [WizardLM-2 8x22B](/microsoft/wizardlm-2-8x22b).\n\n- Advanced reasoning and emotional intelligence for engaging and immersive interactions\n- Vivid writing capabilities enriched with spatial and contextual awareness\n- Enhanced narrative depth, promoting creative and dynamic storytelling", + "context_length": 16000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "vicuna" + }, + "pricing": { + "prompt": "0.0000045", + "completion": "0.0000045", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "thedrummer/unslopnemo-12b", + "canonical_slug": "thedrummer/unslopnemo-12b", + "hugging_face_id": "TheDrummer/UnslopNemo-12B-v4.1", + "name": "TheDrummer: UnslopNemo 12B", + "created": 1731103448, + "description": "UnslopNemo v4.1 is the latest addition from the creator of Rocinante, designed for adventure writing and role-play scenarios.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "anthropic/claude-3.5-haiku", + "canonical_slug": "anthropic/claude-3-5-haiku", + "hugging_face_id": null, + "name": "Anthropic: Claude 3.5 Haiku", + "created": 1730678400, + "description": "Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use. Engineered to excel in real-time applications, it delivers quick response times that are essential for dynamic tasks such as chat interactions and immediate coding suggestions.\n\nThis makes it highly suitable for environments that demand both speed and precision, such as software development, customer service bots, and data management systems.\n\nThis model is currently pointing to [Claude 3.5 Haiku (2024-10-22)](/anthropic/claude-3-5-haiku-20241022).", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000008", + "completion": "0.000004", + "request": "0", + "image": "0", + "web_search": "0.01", + "internal_reasoning": "0", + "input_cache_read": "0.00000008", + "input_cache_write": "0.000001" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 8192, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "anthropic/claude-3.5-haiku-20241022", + "canonical_slug": "anthropic/claude-3-5-haiku-20241022", + "hugging_face_id": null, + "name": "Anthropic: Claude 3.5 Haiku (2024-10-22)", + "created": 1730678400, + "description": "Claude 3.5 Haiku features enhancements across all skill sets including coding, tool use, and reasoning. As the fastest model in the Anthropic lineup, it offers rapid response times suitable for applications that require high interactivity and low latency, such as user-facing chatbots and on-the-fly code completions. It also excels in specialized tasks like data extraction and real-time content moderation, making it a versatile tool for a broad range of industries.\n\nIt does not support image inputs.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/3-5-models-and-computer-use)", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000008", + "completion": "0.000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000008", + "input_cache_write": "0.000001" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "anthracite-org/magnum-v4-72b", + "canonical_slug": "anthracite-org/magnum-v4-72b", + "hugging_face_id": "anthracite-org/magnum-v4-72b", + "name": "Magnum v4 72B", + "created": 1729555200, + "description": "This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and Opus(https://openrouter.ai/anthropic/claude-3-opus).\n\nThe model is fine-tuned on top of [Qwen2.5 72B](https://openrouter.ai/qwen/qwen-2.5-72b-instruct).", + "context_length": 16384, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16384, + "max_completion_tokens": 2048, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_a", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "anthropic/claude-3.5-sonnet", + "canonical_slug": "anthropic/claude-3.5-sonnet", + "hugging_face_id": null, + "name": "Anthropic: Claude 3.5 Sonnet", + "created": 1729555200, + "description": "New Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: Scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0.0048", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 8192, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/ministral-3b", + "canonical_slug": "mistralai/ministral-3b", + "hugging_face_id": null, + "name": "Mistral: Ministral 3B", + "created": 1729123200, + "description": "Ministral 3B is a 3B parameter model optimized for on-device and edge computing. It excels in knowledge, commonsense reasoning, and function-calling, outperforming larger models like Mistral 7B on most benchmarks. Supporting up to 128k context length, it’s ideal for orchestrating agentic workflows and specialist tasks with efficient inference.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000004", + "completion": "0.00000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/ministral-8b", + "canonical_slug": "mistralai/ministral-8b", + "hugging_face_id": null, + "name": "Mistral: Ministral 8B", + "created": 1729123200, + "description": "Ministral 8B is an 8B parameter model featuring a unique interleaved sliding-window attention pattern for faster, memory-efficient inference. Designed for edge use cases, it supports up to 128k context length and excels in knowledge and reasoning tasks. It outperforms peers in the sub-10B category, making it perfect for low-latency, privacy-first applications.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "qwen/qwen-2.5-7b-instruct", + "canonical_slug": "qwen/qwen-2.5-7b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-7B-Instruct", + "name": "Qwen: Qwen2.5 7B Instruct", + "created": 1729036800, + "description": "Qwen2.5 7B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2:\n\n- Significantly more knowledge and has greatly improved capabilities in coding and mathematics, thanks to our specialized expert models in these domains.\n\n- Significant improvements in instruction following, generating long texts (over 8K tokens), understanding structured data (e.g, tables), and generating structured outputs especially JSON. More resilient to the diversity of system prompts, enhancing role-play implementation and condition-setting for chatbots.\n\n- Long-context Support up to 128K tokens and can generate up to 8K tokens.\n\n- Multilingual support for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.00000004", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": null, + "top_p": null, + "frequency_penalty": null + } + }, + { + "id": "nvidia/llama-3.1-nemotron-70b-instruct", + "canonical_slug": "nvidia/llama-3.1-nemotron-70b-instruct", + "hugging_face_id": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", + "name": "NVIDIA: Llama 3.1 Nemotron 70B Instruct", + "created": 1728950400, + "description": "NVIDIA's Llama 3.1 Nemotron 70B is a language model designed for generating precise and useful responses. Leveraging [Llama 3.1 70B](/models/meta-llama/llama-3.1-70b-instruct) architecture and Reinforcement Learning from Human Feedback (RLHF), it excels in automatic alignment benchmarks. This model is tailored for applications requiring high accuracy in helpfulness and response generation, suitable for diverse user queries across multiple domains.\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.0000006", + "completion": "0.0000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "inflection/inflection-3-productivity", + "canonical_slug": "inflection/inflection-3-productivity", + "hugging_face_id": null, + "name": "Inflection: Inflection 3 Productivity", + "created": 1728604800, + "description": "Inflection 3 Productivity is optimized for following instructions. It is better for tasks requiring JSON output or precise adherence to provided guidelines. It has access to recent news.\n\nFor emotional intelligence similar to Pi, see [Inflect 3 Pi](/inflection/inflection-3-pi)\n\nSee [Inflection's announcement](https://inflection.ai/blog/enterprise) for more details.", + "context_length": 8000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8000, + "max_completion_tokens": 1024, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "stop", "temperature", "top_p"], + "default_parameters": {} + }, + { + "id": "inflection/inflection-3-pi", + "canonical_slug": "inflection/inflection-3-pi", + "hugging_face_id": null, + "name": "Inflection: Inflection 3 Pi", + "created": 1728604800, + "description": "Inflection 3 Pi powers Inflection's [Pi](https://pi.ai) chatbot, including backstory, emotional intelligence, productivity, and safety. It has access to recent news, and excels in scenarios like customer support and roleplay.\n\nPi has been trained to mirror your tone and style, if you use more emojis, so will Pi! Try experimenting with various prompts and conversation styles.", + "context_length": 8000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8000, + "max_completion_tokens": 1024, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": ["max_tokens", "stop", "temperature", "top_p"], + "default_parameters": {} + }, + { + "id": "thedrummer/rocinante-12b", + "canonical_slug": "thedrummer/rocinante-12b", + "hugging_face_id": "TheDrummer/Rocinante-12B-v1.1", + "name": "TheDrummer: Rocinante 12B", + "created": 1727654400, + "description": "Rocinante 12B is designed for engaging storytelling and rich prose.\n\nEarly testers have reported:\n- Expanded vocabulary with unique and expressive word choices\n- Enhanced creativity for vivid narratives\n- Adventure-filled and captivating stories", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.00000017", + "completion": "0.00000043", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.2-90b-vision-instruct", + "canonical_slug": "meta-llama/llama-3.2-90b-vision-instruct", + "hugging_face_id": "meta-llama/Llama-3.2-90B-Vision-Instruct", + "name": "Meta: Llama 3.2 90B Vision Instruct", + "created": 1727222400, + "description": "The Llama 90B Vision model is a top-tier, 90-billion-parameter multimodal model designed for the most challenging visual reasoning and language tasks. It offers unparalleled accuracy in image captioning, visual question answering, and advanced image-text comprehension. Pre-trained on vast multimodal datasets and fine-tuned with human feedback, the Llama 90B Vision is engineered to handle the most demanding image-based AI tasks.\n\nThis model is perfect for industries requiring cutting-edge multimodal AI capabilities, particularly those dealing with complex, real-time visual and textual analysis.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "context_length": 32768, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000035", + "completion": "0.0000004", + "request": "0", + "image": "0.0005058", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.2-1b-instruct", + "canonical_slug": "meta-llama/llama-3.2-1b-instruct", + "hugging_face_id": "meta-llama/Llama-3.2-1B-Instruct", + "name": "Meta: Llama 3.2 1B Instruct", + "created": 1727222400, + "description": "Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate efficiently in low-resource environments while maintaining strong task performance.\n\nSupporting eight core languages and fine-tunable for more, Llama 1.3B is ideal for businesses or developers seeking lightweight yet powerful AI solutions that can operate in diverse multilingual settings without the high computational demand of larger models.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "context_length": 60000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.000000027", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 60000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.2-3b-instruct:free", + "canonical_slug": "meta-llama/llama-3.2-3b-instruct", + "hugging_face_id": "meta-llama/Llama-3.2-3B-Instruct", + "name": "Meta: Llama 3.2 3B Instruct (free)", + "created": 1727222400, + "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it supports eight languages, including English, Spanish, and Hindi, and is adaptable for additional languages.\n\nTrained on 9 trillion tokens, the Llama 3.2 3B model excels in instruction-following, complex reasoning, and tool use. Its balanced performance makes it ideal for applications needing accuracy and efficiency in text generation across multilingual settings.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.2-3b-instruct", + "canonical_slug": "meta-llama/llama-3.2-3b-instruct", + "hugging_face_id": "meta-llama/Llama-3.2-3B-Instruct", + "name": "Meta: Llama 3.2 3B Instruct", + "created": 1727222400, + "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it supports eight languages, including English, Spanish, and Hindi, and is adaptable for additional languages.\n\nTrained on 9 trillion tokens, the Llama 3.2 3B model excels in instruction-following, complex reasoning, and tool use. Its balanced performance makes it ideal for applications needing accuracy and efficiency in text generation across multilingual settings.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000002", + "completion": "0.00000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.2-11b-vision-instruct", + "canonical_slug": "meta-llama/llama-3.2-11b-vision-instruct", + "hugging_face_id": "meta-llama/Llama-3.2-11B-Vision-Instruct", + "name": "Meta: Llama 3.2 11B Vision Instruct", + "created": 1727222400, + "description": "Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data. It excels in tasks such as image captioning and visual question answering, bridging the gap between language generation and visual reasoning. Pre-trained on a massive dataset of image-text pairs, it performs well in complex, high-accuracy image analysis.\n\nIts ability to integrate visual understanding with language processing makes it an ideal solution for industries requiring comprehensive visual-linguistic AI applications, such as content creation, AI-driven customer service, and research.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "context_length": 131072, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.000000049", + "completion": "0.000000049", + "request": "0", + "image": "0.00007948", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen-2.5-72b-instruct:free", + "canonical_slug": "qwen/qwen-2.5-72b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-72B-Instruct", + "name": "Qwen2.5 72B Instruct (free)", + "created": 1726704000, + "description": "Qwen2.5 72B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2:\n\n- Significantly more knowledge and has greatly improved capabilities in coding and mathematics, thanks to our specialized expert models in these domains.\n\n- Significant improvements in instruction following, generating long texts (over 8K tokens), understanding structured data (e.g, tables), and generating structured outputs especially JSON. More resilient to the diversity of system prompts, enhancing role-play implementation and condition-setting for chatbots.\n\n- Long-context Support up to 128K tokens and can generate up to 8K tokens.\n\n- Multilingual support for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen-2.5-72b-instruct", + "canonical_slug": "qwen/qwen-2.5-72b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-72B-Instruct", + "name": "Qwen2.5 72B Instruct", + "created": 1726704000, + "description": "Qwen2.5 72B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2:\n\n- Significantly more knowledge and has greatly improved capabilities in coding and mathematics, thanks to our specialized expert models in these domains.\n\n- Significant improvements in instruction following, generating long texts (over 8K tokens), understanding structured data (e.g, tables), and generating structured outputs especially JSON. More resilient to the diversity of system prompts, enhancing role-play implementation and condition-setting for chatbots.\n\n- Long-context Support up to 128K tokens and can generate up to 8K tokens.\n\n- Multilingual support for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.00000007", + "completion": "0.00000026", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "neversleep/llama-3.1-lumimaid-8b", + "canonical_slug": "neversleep/llama-3.1-lumimaid-8b", + "hugging_face_id": "NeverSleep/Lumimaid-v0.2-8B", + "name": "NeverSleep: Lumimaid v0.2 8B", + "created": 1726358400, + "description": "Lumimaid v0.2 8B is a finetune of [Llama 3.1 8B](/models/meta-llama/llama-3.1-8b-instruct) with a \"HUGE step up dataset wise\" compared to Lumimaid v0.1. Sloppy chats output were purged.\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000009", + "completion": "0.0000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_a", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/pixtral-12b", + "canonical_slug": "mistralai/pixtral-12b", + "hugging_face_id": "mistralai/Pixtral-12B-2409", + "name": "Mistral: Pixtral 12B", + "created": 1725926400, + "description": "The first multi-modal, text+image-to-text model from Mistral AI. Its weights were launched via torrent: https://x.com/mistralai/status/1833758285167722836.", + "context_length": 32768, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000001", + "request": "0", + "image": "0.0001445", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "cohere/command-r-08-2024", + "canonical_slug": "cohere/command-r-08-2024", + "hugging_face_id": null, + "name": "Cohere: Command R (08-2024)", + "created": 1724976000, + "description": "command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use. More broadly, it is better at math, code and reasoning and is competitive with the previous version of the larger Command R+ model.\n\nRead the launch post [here](https://docs.cohere.com/changelog/command-gets-refreshed).\n\nUse of this model is subject to Cohere's [Usage Policy](https://docs.cohere.com/docs/usage-policy) and [SaaS Agreement](https://cohere.com/saas-agreement).", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Cohere", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.0000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "cohere/command-r-plus-08-2024", + "canonical_slug": "cohere/command-r-plus-08-2024", + "hugging_face_id": null, + "name": "Cohere: Command R+ (08-2024)", + "created": 1724976000, + "description": "command-r-plus-08-2024 is an update of the [Command R+](/models/cohere/command-r-plus) with roughly 50% higher throughput and 25% lower latencies as compared to the previous Command R+ version, while keeping the hardware footprint the same.\n\nRead the launch post [here](https://docs.cohere.com/changelog/command-gets-refreshed).\n\nUse of this model is subject to Cohere's [Usage Policy](https://docs.cohere.com/docs/usage-policy) and [SaaS Agreement](https://cohere.com/saas-agreement).", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Cohere", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.00001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "sao10k/l3.1-euryale-70b", + "canonical_slug": "sao10k/l3.1-euryale-70b", + "hugging_face_id": "Sao10K/L3.1-70B-Euryale-v2.2", + "name": "Sao10K: Llama 3.1 Euryale 70B v2.2", + "created": 1724803200, + "description": "Euryale L3.1 70B v2.2 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.1](/models/sao10k/l3-euryale-70b).", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000065", + "completion": "0.00000075", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "qwen/qwen-2.5-vl-7b-instruct", + "canonical_slug": "qwen/qwen-2-vl-7b-instruct", + "hugging_face_id": "Qwen/Qwen2.5-VL-7B-Instruct", + "name": "Qwen: Qwen2.5-VL 7B Instruct", + "created": 1724803200, + "description": "Qwen2.5 VL 7B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc.\n\n- Understanding videos of 20min+: Qwen2.5-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc.\n\n- Agent that can operate your mobiles, robots, etc.: with the abilities of complex reasoning and decision making, Qwen2.5-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions.\n\n- Multilingual Support: to serve global users, besides English and Chinese, Qwen2.5-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub repo](https://github.com/QwenLM/Qwen2-VL).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "context_length": 32768, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000002", + "request": "0", + "image": "0.0001445", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "microsoft/phi-3.5-mini-128k-instruct", + "canonical_slug": "microsoft/phi-3.5-mini-128k-instruct", + "hugging_face_id": "microsoft/Phi-3.5-mini-instruct", + "name": "Microsoft: Phi-3.5 Mini 128K Instruct", + "created": 1724198400, + "description": "Phi-3.5 models are lightweight, state-of-the-art open models. These models were trained with Phi-3 datasets that include both synthetic data and the filtered, publicly available websites data, with a focus on high quality and reasoning-dense properties. Phi-3.5 Mini uses 3.8B parameters, and is a dense decoder-only transformer model using the same tokenizer as [Phi-3 Mini](/models/microsoft/phi-3-mini-128k-instruct).\n\nThe models underwent a rigorous enhancement process, incorporating both supervised fine-tuning, proximal policy optimization, and direct preference optimization to ensure precise instruction adherence and robust safety measures. When assessed against benchmarks that test common sense, language understanding, math, code, long context and logical reasoning, Phi-3.5 models showcased robust and state-of-the-art performance among models with less than 13 billion parameters.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "phi3" + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "nousresearch/hermes-3-llama-3.1-70b", + "canonical_slug": "nousresearch/hermes-3-llama-3.1-70b", + "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-70B", + "name": "Nous: Hermes 3 70B Instruct", + "created": 1723939200, + "description": "Hermes 3 is a generalist language model with many improvements over [Hermes 2](/models/nousresearch/nous-hermes-2-mistral-7b-dpo), including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.\n\nHermes 3 70B is a competitive, if not superior finetune of the [Llama-3.1 70B foundation model](/models/meta-llama/llama-3.1-70b-instruct), focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.\n\nThe Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.", + "context_length": 65536, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000003", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 65536, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "nousresearch/hermes-3-llama-3.1-405b:free", + "canonical_slug": "nousresearch/hermes-3-llama-3.1-405b", + "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-405B", + "name": "Nous: Hermes 3 405B Instruct (free)", + "created": 1723766400, + "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.\n\nHermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.\n\nThe Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.\n\nHermes 3 is competitive, if not superior, to Llama-3.1 Instruct models at general capabilities, with varying strengths and weaknesses attributable between the two.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "nousresearch/hermes-3-llama-3.1-405b", + "canonical_slug": "nousresearch/hermes-3-llama-3.1-405b", + "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-405B", + "name": "Nous: Hermes 3 405B Instruct", + "created": 1723766400, + "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.\n\nHermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.\n\nThe Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.\n\nHermes 3 is competitive, if not superior, to Llama-3.1 Instruct models at general capabilities, with varying strengths and weaknesses attributable between the two.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.000001", + "completion": "0.000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/chatgpt-4o-latest", + "canonical_slug": "openai/chatgpt-4o-latest", + "hugging_face_id": null, + "name": "OpenAI: ChatGPT-4o", + "created": 1723593600, + "description": "OpenAI ChatGPT 4o is continually updated by OpenAI to point to the current version of GPT-4o used by ChatGPT. It therefore differs slightly from the API version of [GPT-4o](/models/openai/gpt-4o) in that it has additional RLHF. It is intended for research and evaluation.\n\nOpenAI notes that this model is not suited for production use-cases as it may be removed or redirected to another model in the future.", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000005", + "completion": "0.000015", + "request": "0", + "image": "0.007225", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "sao10k/l3-lunaris-8b", + "canonical_slug": "sao10k/l3-lunaris-8b", + "hugging_face_id": "Sao10K/L3-8B-Lunaris-v1", + "name": "Sao10K: Llama 3 8B Lunaris", + "created": 1723507200, + "description": "Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3. It's a strategic merge of multiple models, designed to balance creativity with improved logic and general knowledge.\n\nCreated by [Sao10k](https://huggingface.co/Sao10k), this model aims to offer an improved experience over Stheno v3.2, with enhanced creativity and logical reasoning.\n\nFor best results, use with Llama 3 Instruct context template, temperature 1.4, and min_p 0.1.", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000004", + "completion": "0.00000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4o-2024-08-06", + "canonical_slug": "openai/gpt-4o-2024-08-06", + "hugging_face_id": null, + "name": "OpenAI: GPT-4o (2024-08-06)", + "created": 1722902400, + "description": "The 2024-08-06 version of GPT-4o offers improved performance in structured outputs, with the ability to supply a JSON schema in the respone_format. Read more [here](https://openai.com/index/introducing-structured-outputs-in-the-api/).\n\nGPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.\n\nFor benchmarking against other models, it was briefly called [\"im-also-a-good-gpt2-chatbot\"](https://twitter.com/LiamFedus/status/1790064963966370209)", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.00001", + "request": "0", + "image": "0.003613", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000125" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.1-405b", + "canonical_slug": "meta-llama/llama-3.1-405b", + "hugging_face_id": "meta-llama/llama-3.1-405B", + "name": "Meta: Llama 3.1 405B (base)", + "created": 1722556800, + "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This is the base 405B pre-trained version.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "none" + }, + "pricing": { + "prompt": "0.000004", + "completion": "0.000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.1-70b-instruct", + "canonical_slug": "meta-llama/llama-3.1-70b-instruct", + "hugging_face_id": "meta-llama/Meta-Llama-3.1-70B-Instruct", + "name": "Meta: Llama 3.1 70B Instruct", + "created": 1721692800, + "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 70B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.1-405b-instruct", + "canonical_slug": "meta-llama/llama-3.1-405b-instruct", + "hugging_face_id": "meta-llama/Meta-Llama-3.1-405B-Instruct", + "name": "Meta: Llama 3.1 405B Instruct", + "created": 1721692800, + "description": "The highly anticipated 400B class of Llama3 is here! Clocking in at 128k context with impressive eval scores, the Meta AI team continues to push the frontier of open-source LLMs.\n\nMeta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 405B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models including GPT-4o and Claude 3.5 Sonnet in evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 130815, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.0000035", + "completion": "0.0000035", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 130815, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3.1-8b-instruct", + "canonical_slug": "meta-llama/llama-3.1-8b-instruct", + "hugging_face_id": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "name": "Meta: Llama 3.1 8B Instruct", + "created": 1721692800, + "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 8B instruct-tuned version is fast and efficient.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000002", + "completion": "0.00000003", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-nemo:free", + "canonical_slug": "mistralai/mistral-nemo", + "hugging_face_id": "mistralai/Mistral-Nemo-Instruct-2407", + "name": "Mistral: Mistral Nemo (free)", + "created": 1721347200, + "description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.\n\nThe model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.\n\nIt supports function calling and is released under the Apache 2.0 license.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 128000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mistral-nemo", + "canonical_slug": "mistralai/mistral-nemo", + "hugging_face_id": "mistralai/Mistral-Nemo-Instruct-2407", + "name": "Mistral: Mistral Nemo", + "created": 1721347200, + "description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.\n\nThe model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.\n\nIt supports function calling and is released under the Apache 2.0 license.", + "context_length": 131072, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.00000002", + "completion": "0.00000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "openai/gpt-4o-mini", + "canonical_slug": "openai/gpt-4o-mini", + "hugging_face_id": null, + "name": "OpenAI: GPT-4o-mini", + "created": 1721260800, + "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.\n\nAs their most advanced small model, it is many multiples more affordable than other recent frontier models, and more than 60% cheaper than [GPT-3.5 Turbo](/models/openai/gpt-3.5-turbo). It maintains SOTA intelligence, while being significantly more cost-effective.\n\nGPT-4o mini achieves an 82% score on MMLU and presently ranks higher than GPT-4 on chat preferences [common leaderboards](https://arena.lmsys.org/).\n\nCheck out the [launch announcement](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) to learn more.\n\n#multimodal", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.0000006", + "request": "0", + "image": "0.000217", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000075" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4o-mini-2024-07-18", + "canonical_slug": "openai/gpt-4o-mini-2024-07-18", + "hugging_face_id": null, + "name": "OpenAI: GPT-4o-mini (2024-07-18)", + "created": 1721260800, + "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.\n\nAs their most advanced small model, it is many multiples more affordable than other recent frontier models, and more than 60% cheaper than [GPT-3.5 Turbo](/models/openai/gpt-3.5-turbo). It maintains SOTA intelligence, while being significantly more cost-effective.\n\nGPT-4o mini achieves an 82% score on MMLU and presently ranks higher than GPT-4 on chat preferences [common leaderboards](https://arena.lmsys.org/).\n\nCheck out the [launch announcement](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) to learn more.\n\n#multimodal", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.0000006", + "request": "0", + "image": "0.007225", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000075" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "google/gemma-2-27b-it", + "canonical_slug": "google/gemma-2-27b-it", + "hugging_face_id": "google/gemma-2-27b-it", + "name": "Google: Gemma 2 27B", + "created": 1720828800, + "description": "Gemma 2 27B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini).\n\nGemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning.\n\nSee the [launch announcement](https://blog.google/technology/developers/google-gemma-2/) for more details. Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0.00000065", + "completion": "0.00000065", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "stop", + "structured_outputs", + "temperature", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "google/gemma-2-9b-it", + "canonical_slug": "google/gemma-2-9b-it", + "hugging_face_id": "google/gemma-2-9b-it", + "name": "Google: Gemma 2 9B", + "created": 1719532800, + "description": "Gemma 2 9B by Google is an advanced, open-source language model that sets a new standard for efficiency and performance in its size class.\n\nDesigned for a wide variety of tasks, it empowers developers and researchers to build innovative applications, while maintaining accessibility, safety, and cost-effectiveness.\n\nSee the [launch announcement](https://blog.google/technology/developers/google-gemma-2/) for more details. Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0.00000003", + "completion": "0.00000009", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "anthropic/claude-3.5-sonnet-20240620", + "canonical_slug": "anthropic/claude-3.5-sonnet-20240620", + "hugging_face_id": null, + "name": "Anthropic: Claude 3.5 Sonnet (2024-06-20)", + "created": 1718841600, + "description": "Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: Autonomously writes, edits, and runs code with reasoning and troubleshooting\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\nFor the latest version (2024-10-23), check out [Claude 3.5 Sonnet](/anthropic/claude-3.5-sonnet).\n\n#multimodal", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0.0048", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "sao10k/l3-euryale-70b", + "canonical_slug": "sao10k/l3-euryale-70b", + "hugging_face_id": "Sao10K/L3-70B-Euryale-v2.1", + "name": "Sao10k: Llama 3 Euryale 70B v2.1", + "created": 1718668800, + "description": "Euryale 70B v2.1 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k).\n\n- Better prompt adherence.\n- Better anatomy / spatial awareness.\n- Adapts much better to unique and custom formatting / reply formats.\n- Very creative, lots of unique swipes.\n- Is not restrictive during roleplays.", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000148", + "completion": "0.00000148", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-7b-instruct-v0.3", + "canonical_slug": "mistralai/mistral-7b-instruct-v0.3", + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3", + "name": "Mistral: Mistral 7B Instruct v0.3", + "created": 1716768000, + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\nAn improved version of [Mistral 7B Instruct v0.2](/models/mistralai/mistral-7b-instruct-v0.2), with the following changes:\n\n- Extended vocabulary to 32768\n- Supports v3 Tokenizer\n- Supports function calling\n\nNOTE: Support for function calling depends on the provider.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mistral-7b-instruct:free", + "canonical_slug": "mistralai/mistral-7b-instruct", + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3", + "name": "Mistral: Mistral 7B Instruct (free)", + "created": 1716768000, + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mistral-7b-instruct", + "canonical_slug": "mistralai/mistral-7b-instruct", + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3", + "name": "Mistral: Mistral 7B Instruct", + "created": 1716768000, + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.000000028", + "completion": "0.000000054", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "nousresearch/hermes-2-pro-llama-3-8b", + "canonical_slug": "nousresearch/hermes-2-pro-llama-3-8b", + "hugging_face_id": "NousResearch/Hermes-2-Pro-Llama-3-8B", + "name": "NousResearch: Hermes 2 Pro - Llama-3 8B", + "created": 1716768000, + "description": "Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house.", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.000000025", + "completion": "0.00000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": 2048, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "microsoft/phi-3-mini-128k-instruct", + "canonical_slug": "microsoft/phi-3-mini-128k-instruct", + "hugging_face_id": "microsoft/Phi-3-mini-128k-instruct", + "name": "Microsoft: Phi-3 Mini 128K Instruct", + "created": 1716681600, + "description": "Phi-3 Mini is a powerful 3.8B parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.\n\nAt time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. This model is static, trained on an offline dataset with an October 2023 cutoff date.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "phi3" + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "microsoft/phi-3-medium-128k-instruct", + "canonical_slug": "microsoft/phi-3-medium-128k-instruct", + "hugging_face_id": "microsoft/Phi-3-medium-128k-instruct", + "name": "Microsoft: Phi-3 Medium 128K Instruct", + "created": 1716508800, + "description": "Phi-3 128K Medium is a powerful 14-billion parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.\n\nAt time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. In the MMLU-Pro eval, the model even comes close to a Llama3 70B level of performance.\n\nFor 4k context length, try [Phi-3 Medium 4K](/models/microsoft/phi-3-medium-4k-instruct).", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Other", + "instruct_type": "phi3" + }, + "pricing": { + "prompt": "0.000001", + "completion": "0.000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4o", + "canonical_slug": "openai/gpt-4o", + "hugging_face_id": null, + "name": "OpenAI: GPT-4o", + "created": 1715558400, + "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.\n\nFor benchmarking against other models, it was briefly called [\"im-also-a-good-gpt2-chatbot\"](https://twitter.com/LiamFedus/status/1790064963966370209)\n\n#multimodal", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000025", + "completion": "0.00001", + "request": "0", + "image": "0.003613", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000125" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4o:extended", + "canonical_slug": "openai/gpt-4o", + "hugging_face_id": null, + "name": "OpenAI: GPT-4o (extended)", + "created": 1715558400, + "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.\n\nFor benchmarking against other models, it was briefly called [\"im-also-a-good-gpt2-chatbot\"](https://twitter.com/LiamFedus/status/1790064963966370209)\n\n#multimodal", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000006", + "completion": "0.000018", + "request": "0", + "image": "0.007225", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 64000, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4o-2024-05-13", + "canonical_slug": "openai/gpt-4o-2024-05-13", + "hugging_face_id": null, + "name": "OpenAI: GPT-4o (2024-05-13)", + "created": 1715558400, + "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.\n\nFor benchmarking against other models, it was briefly called [\"im-also-a-good-gpt2-chatbot\"](https://twitter.com/LiamFedus/status/1790064963966370209)\n\n#multimodal", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image", "file"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000005", + "completion": "0.000015", + "request": "0", + "image": "0.007225", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p", + "web_search_options" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-guard-2-8b", + "canonical_slug": "meta-llama/llama-guard-2-8b", + "hugging_face_id": "meta-llama/Meta-Llama-Guard-2-8B", + "name": "Meta: LlamaGuard 2 8B", + "created": 1715558400, + "description": "This safeguard model has 8B parameters and is based on the Llama 3 family. Just like is predecessor, [LlamaGuard 1](https://huggingface.co/meta-llama/LlamaGuard-7b), it can do both prompt and response classification.\n\nLlamaGuard 2 acts as a normal LLM would, generating text that indicates whether the given input/output is safe/unsafe. If deemed unsafe, it will also share the content categories violated.\n\nFor best results, please use raw prompt input or the `/completions` endpoint, instead of the chat API.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "none" + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3-8b-instruct", + "canonical_slug": "meta-llama/llama-3-8b-instruct", + "hugging_face_id": "meta-llama/Meta-Llama-3-8B-Instruct", + "name": "Meta: Llama 3 8B Instruct", + "created": 1713398400, + "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000003", + "completion": "0.00000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "meta-llama/llama-3-70b-instruct", + "canonical_slug": "meta-llama/llama-3-70b-instruct", + "hugging_face_id": "meta-llama/Meta-Llama-3-70B-Instruct", + "name": "Meta: Llama 3 70B Instruct", + "created": 1713398400, + "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 70B instruct-tuned version was optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 8192, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mixtral-8x22b-instruct", + "canonical_slug": "mistralai/mixtral-8x22b-instruct", + "hugging_face_id": "mistralai/Mixtral-8x22B-Instruct-v0.1", + "name": "Mistral: Mixtral 8x22B Instruct", + "created": 1713312000, + "description": "Mistral's official instruct fine-tuned version of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b). It uses 39B active parameters out of 141B, offering unparalleled cost efficiency for its size. Its strengths include:\n- strong math, coding, and reasoning\n- large context length (64k)\n- fluency in English, French, Italian, German, and Spanish\n\nSee benchmarks on the launch announcement [here](https://mistral.ai/news/mixtral-8x22b/).\n#moe", + "context_length": 65536, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 65536, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "microsoft/wizardlm-2-8x22b", + "canonical_slug": "microsoft/wizardlm-2-8x22b", + "hugging_face_id": "microsoft/WizardLM-2-8x22B", + "name": "WizardLM-2 8x22B", + "created": 1713225600, + "description": "WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models.\n\nIt is an instruct finetune of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b).\n\nTo read more about the model release, [click here](https://wizardlm.github.io/WizardLM2/).\n\n#moe", + "context_length": 65536, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "vicuna" + }, + "pricing": { + "prompt": "0.00000048", + "completion": "0.00000048", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 65536, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4-turbo", + "canonical_slug": "openai/gpt-4-turbo", + "hugging_face_id": null, + "name": "OpenAI: GPT-4 Turbo", + "created": 1712620800, + "description": "The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to December 2023.", + "context_length": 128000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00001", + "completion": "0.00003", + "request": "0", + "image": "0.01445", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "anthropic/claude-3-haiku", + "canonical_slug": "anthropic/claude-3-haiku", + "hugging_face_id": null, + "name": "Anthropic: Claude 3 Haiku", + "created": 1710288000, + "description": "Claude 3 Haiku is Anthropic's fastest and most compact model for\nnear-instant responsiveness. Quick and accurate targeted performance.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-haiku)\n\n#multimodal", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000025", + "completion": "0.00000125", + "request": "0", + "image": "0.0004", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000003", + "input_cache_write": "0.0000003" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "anthropic/claude-3-opus", + "canonical_slug": "anthropic/claude-3-opus", + "hugging_face_id": null, + "name": "Anthropic: Claude 3 Opus", + "created": 1709596800, + "description": "Claude 3 Opus is Anthropic's most powerful model for highly complex tasks. It boasts top-level performance, intelligence, fluency, and understanding.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-family)\n\n#multimodal", + "context_length": 200000, + "architecture": { + "modality": "text+image-\u003Etext", + "input_modalities": ["text", "image"], + "output_modalities": ["text"], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000015", + "completion": "0.000075", + "request": "0", + "image": "0.024", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000015", + "input_cache_write": "0.00001875" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "max_tokens", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-large", + "canonical_slug": "mistralai/mistral-large", + "hugging_face_id": null, + "name": "Mistral Large", + "created": 1708905600, + "description": "This is Mistral AI's flagship model, Mistral Large 2 (version `mistral-large-2407`). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/).\n\nIt supports dozens of languages including French, German, Spanish, Italian, Portuguese, Arabic, Hindi, Russian, Chinese, Japanese, and Korean, along with 80+ coding languages including Python, Java, C, C++, JavaScript, and Bash. Its long context window allows precise information recall from large documents.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "openai/gpt-4-turbo-preview", + "canonical_slug": "openai/gpt-4-turbo-preview", + "hugging_face_id": null, + "name": "OpenAI: GPT-4 Turbo Preview", + "created": 1706140800, + "description": "The preview GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Dec 2023.\n\n**Note:** heavily rate limited by OpenAI while in preview.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00001", + "completion": "0.00003", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-3.5-turbo-0613", + "canonical_slug": "openai/gpt-3.5-turbo-0613", + "hugging_face_id": null, + "name": "OpenAI: GPT-3.5 Turbo (older v0613)", + "created": 1706140800, + "description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.", + "context_length": 4095, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000001", + "completion": "0.000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4095, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-small", + "canonical_slug": "mistralai/mistral-small", + "hugging_face_id": null, + "name": "Mistral Small", + "created": 1704844800, + "description": "With 22 billion parameters, Mistral Small v24.09 offers a convenient mid-point between (Mistral NeMo 12B)[/mistralai/mistral-nemo] and (Mistral Large 2)[/mistralai/mistral-large], providing a cost-effective solution that can be deployed across various platforms and environments. It has better reasoning, exhibits more capabilities, can produce and reason about code, and is multiligual, supporting English, French, German, Italian, and Spanish.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mistral-tiny", + "canonical_slug": "mistralai/mistral-tiny", + "hugging_face_id": null, + "name": "Mistral Tiny", + "created": 1704844800, + "description": "Note: This model is being deprecated. Recommended replacement is the newer [Ministral 8B](/mistral/ministral-8b)\n\nThis model is currently powered by Mistral-7B-v0.2, and incorporates a \"better\" fine-tuning than [Mistral 7B](/models/mistralai/mistral-7b-instruct-v0.1), inspired by community work. It's best used for large batch processing tasks where cost is a significant factor but reasoning capabilities are not crucial.", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000025", + "completion": "0.00000025", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mistral-7b-instruct-v0.2", + "canonical_slug": "mistralai/mistral-7b-instruct-v0.2", + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.2", + "name": "Mistral: Mistral 7B Instruct v0.2", + "created": 1703721600, + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\nAn improved version of [Mistral 7B Instruct](/modelsmistralai/mistral-7b-instruct-v0.1), with the following changes:\n\n- 32k context window (vs 8k context in v0.1)\n- Rope-theta = 1e6\n- No Sliding-Window Attention", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "stop", + "temperature", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "mistralai/mixtral-8x7b-instruct", + "canonical_slug": "mistralai/mixtral-8x7b-instruct", + "hugging_face_id": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "name": "Mistral: Mixtral 8x7B Instruct", + "created": 1702166400, + "description": "Mixtral 8x7B Instruct is a pretrained generative Sparse Mixture of Experts, by Mistral AI, for chat and instruction use. Incorporates 8 experts (feed-forward networks) for a total of 47 billion parameters.\n\nInstruct model fine-tuned by Mistral. #moe", + "context_length": 32768, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.00000054", + "completion": "0.00000054", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 16384, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "neversleep/noromaid-20b", + "canonical_slug": "neversleep/noromaid-20b", + "hugging_face_id": "NeverSleep/Noromaid-20b-v0.1.1", + "name": "Noromaid 20B", + "created": 1700956800, + "description": "A collab between IkariDev and Undi. This merge is suitable for RP, ERP, and general knowledge.\n\n#merge #uncensored", + "context_length": 4096, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama2", + "instruct_type": "alpaca" + }, + "pricing": { + "prompt": "0.000001", + "completion": "0.00000175", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4096, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_a", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "alpindale/goliath-120b", + "canonical_slug": "alpindale/goliath-120b", + "hugging_face_id": "alpindale/goliath-120b", + "name": "Goliath 120B", + "created": 1699574400, + "description": "A large LLM created by combining two fine-tuned Llama 70B models into one 120B model. Combines Xwin and Euryale.\n\nCredits to\n- [@chargoddard](https://huggingface.co/chargoddard) for developing the framework used to merge the model - [mergekit](https://github.com/cg123/mergekit).\n- [@Undi95](https://huggingface.co/Undi95) for helping with the merge ratios.\n\n#merge", + "context_length": 6144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama2", + "instruct_type": "airoboros" + }, + "pricing": { + "prompt": "0.000006", + "completion": "0.000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 6144, + "max_completion_tokens": 1024, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_a", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openrouter/auto", + "canonical_slug": "openrouter/auto", + "hugging_face_id": null, + "name": "Auto Router", + "created": 1699401600, + "description": "Your prompt will be processed by a meta-model and routed to one of dozens of models (see below), optimizing for the best possible output.\n\nTo see which model was used, visit [Activity](/activity), or read the `model` attribute of the response. Your response will be priced at the same rate as the routed model.\n\nThe meta-model is powered by [Not Diamond](https://docs.notdiamond.ai/docs/how-not-diamond-works). Learn more in our [docs](/docs/model-routing).\n\nRequests will be routed to the following models:\n- [openai/gpt-5](/openai/gpt-5)\n- [openai/gpt-5-mini](/openai/gpt-5-mini)\n- [openai/gpt-5-nano](/openai/gpt-5-nano)\n- [openai/gpt-4.1-nano](/openai/gpt-4.1-nano)\n- [openai/gpt-4.1](/openai/gpt-4.1)\n- [openai/gpt-4.1-mini](/openai/gpt-4.1-mini)\n- [openai/gpt-4.1](/openai/gpt-4.1)\n- [openai/gpt-4o-mini](/openai/gpt-4o-mini)\n- [openai/chatgpt-4o-latest](/openai/chatgpt-4o-latest)\n- [anthropic/claude-3.5-haiku](/anthropic/claude-3.5-haiku)\n- [anthropic/claude-opus-4-1](/anthropic/claude-opus-4-1)\n- [anthropic/claude-sonnet-4-0](/anthropic/claude-sonnet-4-0)\n- [anthropic/claude-3-7-sonnet-latest](/anthropic/claude-3-7-sonnet-latest)\n- [google/gemini-2.5-pro](/google/gemini-2.5-pro)\n- [google/gemini-2.5-flash](/google/gemini-2.5-flash)\n- [mistral/mistral-large-latest](/mistral/mistral-large-latest)\n- [mistral/mistral-medium-latest](/mistral/mistral-medium-latest)\n- [mistral/mistral-small-latest](/mistral/mistral-small-latest)\n- [mistralai/mistral-nemo](/mistralai/mistral-nemo)\n- [x-ai/grok-3](/x-ai/grok-3)\n- [x-ai/grok-3-mini](/x-ai/grok-3-mini)\n- [x-ai/grok-4](/x-ai/grok-4)\n- [deepseek/deepseek-r1](/deepseek/deepseek-r1)\n- [meta-llama/llama-3.1-70b-instruct](/meta-llama/llama-3.1-70b-instruct)\n- [meta-llama/llama-3.1-405b-instruct](/meta-llama/llama-3.1-405b-instruct)\n- [mistralai/mixtral-8x22b-instruct](/mistralai/mixtral-8x22b-instruct)\n- [perplexity/sonar](/perplexity/sonar)\n- [cohere/command-r-plus](/cohere/command-r-plus)\n- [cohere/command-r](/cohere/command-r)", + "context_length": 2000000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Router", + "instruct_type": null + }, + "pricing": { + "prompt": "-1", + "completion": "-1" + }, + "top_provider": { + "context_length": null, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [], + "default_parameters": {} + }, + { + "id": "openai/gpt-4-1106-preview", + "canonical_slug": "openai/gpt-4-1106-preview", + "hugging_face_id": null, + "name": "OpenAI: GPT-4 Turbo (older v1106)", + "created": 1699228800, + "description": "The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to April 2023.", + "context_length": 128000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00001", + "completion": "0.00003", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mistralai/mistral-7b-instruct-v0.1", + "canonical_slug": "mistralai/mistral-7b-instruct-v0.1", + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.1", + "name": "Mistral: Mistral 7B Instruct v0.1", + "created": 1695859200, + "description": "A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length.", + "context_length": 2824, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.00000011", + "completion": "0.00000019", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 2824, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "seed", + "stop", + "temperature", + "tool_choice", + "tools", + "top_k", + "top_p" + ], + "default_parameters": { + "temperature": 0.3 + } + }, + { + "id": "openai/gpt-3.5-turbo-instruct", + "canonical_slug": "openai/gpt-3.5-turbo-instruct", + "hugging_face_id": null, + "name": "OpenAI: GPT-3.5 Turbo Instruct", + "created": 1695859200, + "description": "This model is a variant of GPT-3.5 Turbo tuned for instructional prompts and omitting chat-related optimizations. Training data: up to Sep 2021.", + "context_length": 4095, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.0000015", + "completion": "0.000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4095, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-3.5-turbo-16k", + "canonical_slug": "openai/gpt-3.5-turbo-16k", + "hugging_face_id": null, + "name": "OpenAI: GPT-3.5 Turbo 16k", + "created": 1693180800, + "description": "This model offers four times the context length of gpt-3.5-turbo, allowing it to support approximately 20 pages of text in a single request at a higher cost. Training data: up to Sep 2021.", + "context_length": 16385, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16385, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "mancer/weaver", + "canonical_slug": "mancer/weaver", + "hugging_face_id": null, + "name": "Mancer: Weaver (alpha)", + "created": 1690934400, + "description": "An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory. Meant for use in roleplay/narrative situations.", + "context_length": 8000, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama2", + "instruct_type": "alpaca" + }, + "pricing": { + "prompt": "0.000001125", + "completion": "0.000001125", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8000, + "max_completion_tokens": 2000, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "temperature", + "top_a", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "undi95/remm-slerp-l2-13b", + "canonical_slug": "undi95/remm-slerp-l2-13b", + "hugging_face_id": "Undi95/ReMM-SLERP-L2-13B", + "name": "ReMM SLERP 13B", + "created": 1689984000, + "description": "A recreation trial of the original MythoMax-L2-B13 but with updated models. #merge", + "context_length": 6144, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama2", + "instruct_type": "alpaca" + }, + "pricing": { + "prompt": "0.00000045", + "completion": "0.00000065", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 6144, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_a", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "gryphe/mythomax-l2-13b", + "canonical_slug": "gryphe/mythomax-l2-13b", + "hugging_face_id": "Gryphe/MythoMax-L2-13b", + "name": "MythoMax 13B", + "created": 1688256000, + "description": "One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay. #merge", + "context_length": 4096, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "Llama2", + "instruct_type": "alpaca" + }, + "pricing": { + "prompt": "0.00000006", + "completion": "0.00000006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4096, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "min_p", + "presence_penalty", + "repetition_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "top_a", + "top_k", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4-0314", + "canonical_slug": "openai/gpt-4-0314", + "hugging_face_id": null, + "name": "OpenAI: GPT-4 (older v0314)", + "created": 1685232000, + "description": "GPT-4-0314 is the first version of GPT-4 released, with a context length of 8,192 tokens, and was supported until June 14. Training data: up to Sep 2021.", + "context_length": 8191, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00003", + "completion": "0.00006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8191, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-4", + "canonical_slug": "openai/gpt-4", + "hugging_face_id": null, + "name": "OpenAI: GPT-4", + "created": 1685232000, + "description": "OpenAI's flagship model, GPT-4 is a large-scale multimodal language model capable of solving difficult problems with greater accuracy than previous models due to its broader general knowledge and advanced reasoning capabilities. Training data: up to Sep 2021.", + "context_length": 8191, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00003", + "completion": "0.00006", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8191, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + }, + { + "id": "openai/gpt-3.5-turbo", + "canonical_slug": "openai/gpt-3.5-turbo", + "hugging_face_id": null, + "name": "OpenAI: GPT-3.5 Turbo", + "created": 1685232000, + "description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.", + "context_length": 16385, + "architecture": { + "modality": "text-\u003Etext", + "input_modalities": ["text"], + "output_modalities": ["text"], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000005", + "completion": "0.0000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16385, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null, + "supported_parameters": [ + "frequency_penalty", + "logit_bias", + "logprobs", + "max_tokens", + "presence_penalty", + "response_format", + "seed", + "stop", + "structured_outputs", + "temperature", + "tool_choice", + "tools", + "top_logprobs", + "top_p" + ], + "default_parameters": {} + } + ] +} diff --git a/gui/src/pages/AddNewModel/configs/providers.ts b/gui/src/pages/AddNewModel/configs/providers.ts index 1bab7abeb81..4d2078fb24a 100644 --- a/gui/src/pages/AddNewModel/configs/providers.ts +++ b/gui/src/pages/AddNewModel/configs/providers.ts @@ -3,6 +3,7 @@ import { ModelProviderTags } from "../../../components/modelSelection/utils"; import { completionParamsInputs } from "./completionParamsInputs"; import type { ModelPackage } from "./models"; import { models } from "./models"; +import { openRouterModelsList } from "./openRouterModel"; export interface InputDescriptor { inputType: HTMLInputTypeAttribute; @@ -170,6 +171,29 @@ export const providers: Partial> = { packages: [models.claude4Sonnet, models.claude41Opus, models.claude35Haiku], apiKeyUrl: "https://console.anthropic.com/account/keys", }, + openrouter: { + title: "OpenRouter", + provider: "openrouter", + description: + "OpenRouter provides access to a variety of LLMs including open-source and proprietary models.", + longDescription: `To get started with OpenRouter, sign up for an account at [openrouter.ai](https://openrouter.ai/) and obtain your API key from the dashboard.`, + icon: "openrouter.png", + tags: [ModelProviderTags.RequiresApiKey], + refPage: "openrouter", + apiKeyUrl: "https://openrouter.ai/settings/keys", + collectInputFor: [ + { + inputType: "text", + key: "apiKey", + label: "API Key", + placeholder: "Enter your OpenRouter API key", + required: true, + }, + ...completionParamsInputsConfigs, + ], + packages: openRouterModelsList, + }, + moonshot: { title: "Moonshot", provider: "moonshot", From a89daa67aedb1e5bfbf63bb275d29382e462c1e6 Mon Sep 17 00:00:00 2001 From: Parthasarathy Date: Sun, 16 Nov 2025 01:43:01 +0530 Subject: [PATCH 2/8] feat: add search functionality to model selection listbox --- .../modelSelection/ModelSelectionListbox.tsx | 262 ++++++++++++------ gui/src/forms/AddModelForm.tsx | 1 + 2 files changed, 184 insertions(+), 79 deletions(-) diff --git a/gui/src/components/modelSelection/ModelSelectionListbox.tsx b/gui/src/components/modelSelection/ModelSelectionListbox.tsx index 04510b6bb33..a7bf463a987 100644 --- a/gui/src/components/modelSelection/ModelSelectionListbox.tsx +++ b/gui/src/components/modelSelection/ModelSelectionListbox.tsx @@ -2,8 +2,9 @@ import { CheckIcon, ChevronUpDownIcon, CubeIcon, + MagnifyingGlassIcon, } from "@heroicons/react/24/outline"; -import { Fragment } from "react"; +import { Fragment, useEffect, useMemo, useState } from "react"; import { Listbox, ListboxButton, @@ -19,6 +20,34 @@ interface ModelSelectionListboxProps { setSelectedProvider: (val: DisplayInfo) => void; topOptions?: DisplayInfo[]; otherOptions?: DisplayInfo[]; + searchPlaceholder?: string; +} + +/** + * Simple fuzzy search algorithm + * Returns a score based on how well the query matches the text + */ +function fuzzyScore(query: string, text: string): number { + const q = query.toLowerCase(); + const t = text.toLowerCase(); + + if (!q) return 1; // Empty query matches everything + if (!t) return 0; + + let score = 0; + let queryIdx = 0; + let lastMatchIdx = -1; + + for (let i = 0; i < t.length && queryIdx < q.length; i++) { + if (t[i] === q[queryIdx]) { + score += 1 + (lastMatchIdx === i - 1 ? 5 : 0); // Bonus for consecutive matches + lastMatchIdx = i; + queryIdx++; + } + } + + // Return 0 if not all query characters were found + return queryIdx === q.length ? score / t.length : 0; } function ModelSelectionListbox({ @@ -26,9 +55,51 @@ function ModelSelectionListbox({ setSelectedProvider, topOptions = [], otherOptions = [], + searchPlaceholder = "Search models...", }: ModelSelectionListboxProps) { + const [searchQuery, setSearchQuery] = useState(""); + + // Clear search query when provider changes + useEffect(() => { + setSearchQuery(""); + }, [selectedProvider]); + + // Combine and filter options based on fuzzy search + const filteredTopOptions = useMemo(() => { + if (!searchQuery) return topOptions; + return topOptions + .map((opt) => ({ + option: opt, + score: fuzzyScore(searchQuery, opt.title), + })) + .filter(({ score }) => score > 0) + .sort((a, b) => b.score - a.score) + .map(({ option }) => option); + }, [searchQuery, topOptions]); + + const filteredOtherOptions = useMemo(() => { + if (!searchQuery) return otherOptions; + return otherOptions + .map((opt) => ({ + option: opt, + score: fuzzyScore(searchQuery, opt.title), + })) + .filter(({ score }) => score > 0) + .sort((a, b) => b.score - a.score) + .map(({ option }) => option); + }, [searchQuery, otherOptions]); + + const hasResults = + filteredTopOptions.length > 0 || filteredOtherOptions.length > 0; + return ( - + { + setSelectedProvider(value); + setSearchQuery(""); + }} + >
@@ -54,87 +125,120 @@ function ModelSelectionListbox({ leaveFrom="opacity-100" leaveTo="opacity-0" > - - {topOptions.length > 0 && ( -
-
- Popular -
- {topOptions.map((option, index) => ( - - ` ${selected ? "bg-list-active" : "bg-input"} hover:bg-list-active hover:text-list-active-foreground relative flex cursor-default cursor-pointer select-none items-center justify-between gap-2 p-1.5 px-3 py-2 pr-4` - } - value={option} - > - {({ selected }) => ( - <> -
- {option.title === "Autodetect" ? ( - - ) : ( - window.vscMediaUrl && - option.icon && ( - - ) - )} - {option.title} -
- {selected && ( -
- ))} + + {/* Search Box */} +
+
+ + setSearchQuery(e.target.value)} + className="bg-background text-foreground placeholder-description-muted w-full border-0 px-2 py-1.5 outline-none" + onClick={(e) => e.stopPropagation()} + />
- )} - {topOptions.length > 0 && otherOptions.length > 0 && ( -
- )} - {otherOptions.length > 0 && ( -
-
- Additional providers +
+ + {/* Results */} +
+ {!hasResults ? ( +
+ No models found matching "{searchQuery}"
- {otherOptions.map((option, index) => ( - - ` ${selected ? "bg-list-active" : "bg-input"} hover:bg-list-active hover:text-list-active-foreground relative flex cursor-default cursor-pointer select-none items-center justify-between gap-2 p-1.5 px-3 py-2 pr-4` - } - value={option} - > - {({ selected }) => ( - <> -
- {option.title === "Autodetect" ? ( - - ) : ( - window.vscMediaUrl && - option.icon && ( - - ) + ) : ( + <> + {filteredTopOptions.length > 0 && ( +
+
+ Popular +
+ {filteredTopOptions.map((option, index) => ( + + ` ${selected ? "bg-list-active" : "bg-input"} hover:bg-list-active hover:text-list-active-foreground relative flex cursor-default select-none items-center justify-between gap-2 p-1.5 px-3 py-2 pr-4` + } + value={option} + > + {({ selected }) => ( + <> +
+ {option.title === "Autodetect" ? ( + + ) : ( + window.vscMediaUrl && + option.icon && ( + + ) + )} + {option.title} +
+ {selected && ( +
- - {selected && ( -
+ )} + {filteredTopOptions.length > 0 && + filteredOtherOptions.length > 0 && ( +
)} - - ))} -
- )} + {filteredOtherOptions.length > 0 && ( +
+
+ Additional providers +
+ {filteredOtherOptions.map((option, index) => ( + + ` ${selected ? "bg-list-active" : "bg-input"} hover:bg-list-active hover:text-list-active-foreground relative flex cursor-pointer select-none items-center justify-between gap-2 p-1.5 px-3 py-2 pr-4` + } + value={option} + > + {({ selected }) => ( + <> +
+ {option.title === "Autodetect" ? ( + + ) : ( + window.vscMediaUrl && + option.icon && ( + + ) + )} + {option.title} +
+ + {selected && ( +
+ ))} +
+ )} + + )} +
diff --git a/gui/src/forms/AddModelForm.tsx b/gui/src/forms/AddModelForm.tsx index b83d2a8945a..3d24816c4d0 100644 --- a/gui/src/forms/AddModelForm.tsx +++ b/gui/src/forms/AddModelForm.tsx @@ -150,6 +150,7 @@ export function AddModelForm({ }} topOptions={popularProviders} otherOptions={otherProviders} + searchPlaceholder="Search providers..." /> Don't see your provider?{" "} From 46619a75cf4d0975927c3c42d99aec085d9625f2 Mon Sep 17 00:00:00 2001 From: partha-sarathyy Date: Sun, 16 Nov 2025 02:20:27 +0530 Subject: [PATCH 3/8] Update gui/src/components/modelSelection/ModelSelectionListbox.tsx ai suggestion Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com> --- gui/src/components/modelSelection/ModelSelectionListbox.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gui/src/components/modelSelection/ModelSelectionListbox.tsx b/gui/src/components/modelSelection/ModelSelectionListbox.tsx index a7bf463a987..87ba2822c56 100644 --- a/gui/src/components/modelSelection/ModelSelectionListbox.tsx +++ b/gui/src/components/modelSelection/ModelSelectionListbox.tsx @@ -158,7 +158,7 @@ function ModelSelectionListbox({ - ` ${selected ? "bg-list-active" : "bg-input"} hover:bg-list-active hover:text-list-active-foreground relative flex cursor-default select-none items-center justify-between gap-2 p-1.5 px-3 py-2 pr-4` + ` ${selected ? "bg-list-active" : "bg-input"} hover:bg-list-active hover:text-list-active-foreground relative flex cursor-pointer select-none items-center justify-between gap-2 p-1.5 px-3 py-2 pr-4` } value={option} > From 564e0b06f32525a6fd30d291d29e6fc9d87678d6 Mon Sep 17 00:00:00 2001 From: Parthasarathy Date: Sun, 16 Nov 2025 02:23:41 +0530 Subject: [PATCH 4/8] fix: update isOpenSource determination based on hugging_face_id --- gui/src/pages/AddNewModel/configs/openRouterModel.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gui/src/pages/AddNewModel/configs/openRouterModel.ts b/gui/src/pages/AddNewModel/configs/openRouterModel.ts index 10ee5aa3d65..07756d1eb1b 100644 --- a/gui/src/pages/AddNewModel/configs/openRouterModel.ts +++ b/gui/src/pages/AddNewModel/configs/openRouterModel.ts @@ -57,7 +57,7 @@ function convertOpenRouterModelToPackage(model: OpenRouterModel): ModelPackage { model: model.id, contextLength, }, - isOpenSource: model.architecture?.modality ? false : true, + isOpenSource: !!model.hugging_face_id, tags: [provider as any], }; } From cc1c013934816a9048d8c1f9a931b95e967e6bba Mon Sep 17 00:00:00 2001 From: Parthasarathy Date: Sun, 16 Nov 2025 20:41:24 +0530 Subject: [PATCH 5/8] Trigger CI/CD From 84dd6bac8f733c48b3f2848d1885bb6b66f6a71a Mon Sep 17 00:00:00 2001 From: Parthasarathy Date: Wed, 19 Nov 2025 20:13:40 +0530 Subject: [PATCH 6/8] refactor: propery cleanup openrouter --- .../AddNewModel/configs/openRouterModel.ts | 39 +- .../AddNewModel/configs/openRouterModels.json | 14681 +--------------- 2 files changed, 678 insertions(+), 14042 deletions(-) diff --git a/gui/src/pages/AddNewModel/configs/openRouterModel.ts b/gui/src/pages/AddNewModel/configs/openRouterModel.ts index 07756d1eb1b..2c5ba5b4ac2 100644 --- a/gui/src/pages/AddNewModel/configs/openRouterModel.ts +++ b/gui/src/pages/AddNewModel/configs/openRouterModel.ts @@ -3,32 +3,10 @@ import openRouterModelsData from "./openRouterModels.json"; interface OpenRouterModel { id: string; - canonical_slug: string; - hugging_face_id: string; name: string; - created: number; description: string; context_length: number; - architecture: { - modality: string; - instruct_type: string | null; - [key: string]: any; - }; - pricing: { - prompt: string; - completion: string; - request?: string; - image?: string; - [key: string]: any; - }; - top_provider: { - max_completion_tokens?: number; - is_moderated: boolean; - [key: string]: any; - }; - per_request_limits: null | { [key: string]: any }; - supported_parameters: string[]; - default_parameters: null | { [key: string]: any }; + hugging_face_id: string; } /** @@ -38,24 +16,13 @@ function convertOpenRouterModelToPackage(model: OpenRouterModel): ModelPackage { // Extract provider name from id (e.g., "openai/gpt-5.1" -> "openai") const [provider] = model.id.split("/"); - // Create a friendly title from the name - const title = model.name; - - // Extract context length - const contextLength = model.context_length; - - // Get pricing info for display - const pricingInfo = model.pricing - ? `Prompt: $${model.pricing.prompt}/1K tokens, Completion: $${model.pricing.completion}/1K tokens` - : "Pricing not available"; - return { - title, + title: model.name, description: model.description, refUrl: `https://openrouter.ai/models/${model.id}`, params: { model: model.id, - contextLength, + contextLength: model.context_length, }, isOpenSource: !!model.hugging_face_id, tags: [provider as any], diff --git a/gui/src/pages/AddNewModel/configs/openRouterModels.json b/gui/src/pages/AddNewModel/configs/openRouterModels.json index 14662eb2db4..e4c6fe0e63e 100644 --- a/gui/src/pages/AddNewModel/configs/openRouterModels.json +++ b/gui/src/pages/AddNewModel/configs/openRouterModels.json @@ -2,15742 +2,2411 @@ "data": [ { "id": "openai/gpt-5.1", - "canonical_slug": "openai/gpt-5.1-20251113", - "hugging_face_id": "", "name": "OpenAI: GPT-5.1", - "created": 1763060305, - "description": "GPT-5.1 is the latest frontier-grade model in the GPT-5 series, offering stronger general-purpose reasoning, improved instruction adherence, and a more natural conversational style compared to GPT-5. It uses adaptive reasoning to allocate computation dynamically, responding quickly to simple queries while spending more depth on complex tasks. The model produces clearer, more grounded explanations with reduced jargon, making it easier to follow even on technical or multi-step problems.\n\nBuilt for broad task coverage, GPT-5.1 delivers consistent gains across math, coding, and structured analysis workloads, with more coherent long-form answers and improved tool-use reliability. It also features refined conversational alignment, enabling warmer, more intuitive responses without compromising precision. GPT-5.1 serves as the primary full-capability successor to GPT-5", + "description": "GPT-5.1 is the latest frontier-grade model in the GPT-5 series, offering stronger general-purpose reasoning, improved instruction adherence, and a more natural conversational style compared to GPT-5.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.000000125" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 128000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "tool_choice", - "tools", - "top_logprobs" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "openai/gpt-5.1-chat", - "canonical_slug": "openai/gpt-5.1-chat-20251113", - "hugging_face_id": "", "name": "OpenAI: GPT-5.1 Chat", - "created": 1763060302, - "description": "GPT-5.1 Chat (AKA Instant is the fast, lightweight member of the 5.1 family, optimized for low-latency chat while retaining strong general intelligence. It uses adaptive reasoning to selectively “think” on harder queries, improving accuracy on math, coding, and multi-step tasks without slowing down typical conversations. The model is warmer and more conversational by default, with better instruction following and more stable short-form reasoning. GPT-5.1 Chat is designed for high-throughput, interactive workloads where responsiveness and consistency matter more than deep deliberation.\n", + "description": "GPT-5.1 Chat (AKA Instant is the fast, lightweight member of the 5.1 family, optimized for low-latency chat while retaining strong general intelligence.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["file", "image", "text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.000000125" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "top_logprobs" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "openai/gpt-5.1-codex", - "canonical_slug": "openai/gpt-5.1-codex-20251113", - "hugging_face_id": "", "name": "OpenAI: GPT-5.1-Codex", - "created": 1763060298, - "description": "GPT-5.1-Codex is a specialized version of GPT-5.1 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks. The model supports building projects from scratch, feature development, debugging, large-scale refactoring, and code review. Compared to GPT-5.1, Codex is more steerable, adheres closely to developer instructions, and produces cleaner, higher-quality code outputs. Reasoning effort can be adjusted with the `reasoning.effort` parameter. Read the [docs here](https://openrouter.ai/docs/use-cases/reasoning-tokens#reasoning-effort-level)\n\nCodex integrates into developer environments including the CLI, IDE extensions, GitHub, and cloud tasks. It adapts reasoning effort dynamically—providing fast responses for small tasks while sustaining extended multi-hour runs for large projects. The model is trained to perform structured code reviews, catching critical flaws by reasoning over dependencies and validating behavior against tests. It also supports multimodal inputs such as images or screenshots for UI development and integrates tool use for search, dependency installation, and environment setup. Codex is intended specifically for agentic coding applications.", + "description": "GPT-5.1-Codex is a specialized version of GPT-5.1 optimized for software engineering and coding workflows.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000125" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 128000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "tool_choice", - "tools", - "top_logprobs" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "openai/gpt-5.1-codex-mini", - "canonical_slug": "openai/gpt-5.1-codex-mini-20251113", - "hugging_face_id": "", "name": "OpenAI: GPT-5.1-Codex-Mini", - "created": 1763057820, - "description": "GPT-5.1-Codex-Mini is a smaller and faster version of GPT-5.1-Codex", + "description": "GPT-5.1-Codex-Mini is a smaller and faster version of GPT-5.1-Codex.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000025", - "completion": "0.000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000025" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "tool_choice", - "tools", - "top_logprobs" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "kwaipilot/kat-coder-pro:free", - "canonical_slug": "kwaipilot/kat-coder-pro-v1", - "hugging_face_id": "", "name": "Kwaipilot: KAT-Coder-Pro V1 (free)", - "created": 1762745912, - "description": "KAT-Coder-Pro V1 is KwaiKAT's most advanced agentic coding model in the KAT-Coder series. Designed specifically for agentic coding tasks, it excels in real-world software engineering scenarios, achieving 73.4% solve rate on the SWE-Bench Verified benchmark. \n\nThe model has been optimized for tool-use capability, multi-turn interaction, instruction following, generalization, and comprehensive capabilities through a multi-stage training process, including mid-training, supervised fine-tuning (SFT), reinforcement fine-tuning (RFT), and scalable agentic RL.", + "description": "KAT-Coder-Pro V1 is KwaiKAT's most advanced agentic coding model in the KAT-Coder series.", "context_length": 256000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": 32000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "moonshotai/kimi-linear-48b-a3b-instruct", - "canonical_slug": "moonshotai/kimi-linear-48b-a3b-instruct-20251029", - "hugging_face_id": "moonshotai/Kimi-Linear-48B-A3B-Instruct", "name": "MoonshotAI: Kimi Linear 48B A3B Instruct", - "created": 1762565833, - "description": "Kimi Linear is a hybrid linear attention architecture that outperforms traditional full attention methods across various contexts, including short, long, and reinforcement learning (RL) scaling regimes. At its core is Kimi Delta Attention (KDA)—a refined version of Gated DeltaNet that introduces a more efficient gating mechanism to optimize the use of finite-state RNN memory.\n\nKimi Linear achieves superior performance and hardware efficiency, especially for long-context tasks. It reduces the need for large KV caches by up to 75% and boosts decoding throughput by up to 6x for contexts as long as 1M tokens.", + "description": "Kimi Linear is a hybrid linear attention architecture that outperforms traditional full attention methods across various contexts, including short, long, and reinforcement learning (RL) scaling.", "context_length": 1048576, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 1048576, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "moonshotai/Kimi-Linear-48B-A3B-Instruct" }, { "id": "moonshotai/kimi-k2-thinking", - "canonical_slug": "moonshotai/kimi-k2-thinking-20251106", - "hugging_face_id": "moonshotai/Kimi-K2-Thinking", "name": "MoonshotAI: Kimi K2 Thinking", - "created": 1762440622, - "description": "Kimi K2 Thinking is Moonshot AI’s most advanced open reasoning model to date, extending the K2 series into agentic, long-horizon reasoning. Built on the trillion-parameter Mixture-of-Experts (MoE) architecture introduced in Kimi K2, it activates 32 billion parameters per forward pass and supports 256 k-token context windows. The model is optimized for persistent step-by-step thought, dynamic tool invocation, and complex reasoning workflows that span hundreds of turns. It interleaves step-by-step reasoning with tool use, enabling autonomous research, coding, and writing that can persist for hundreds of sequential actions without drift.\n\nIt sets new open-source benchmarks on HLE, BrowseComp, SWE-Multilingual, and LiveCodeBench, while maintaining stable multi-agent behavior through 200–300 tool calls. Built on a large-scale MoE architecture with MuonClip optimization, it combines strong reasoning depth with high inference efficiency for demanding agentic and analytical tasks.", + "description": "Kimi K2 Thinking is Moonshot AI’s most advanced open reasoning model to date, extending the K2 series into agentic, long-horizon reasoning.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000055", - "completion": "0.00000225", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "moonshotai/Kimi-K2-Thinking" }, { "id": "amazon/nova-premier-v1", - "canonical_slug": "amazon/nova-premier-v1", - "hugging_face_id": "", "name": "Amazon: Nova Premier 1.0", - "created": 1761950332, "description": "Amazon Nova Premier is the most capable of Amazon’s multimodal models for complex reasoning tasks and for use as the best teacher for distilling custom models.", "context_length": 1000000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Nova", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.0000125", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000625" - }, - "top_provider": { - "context_length": 1000000, - "max_completion_tokens": 32000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "perplexity/sonar-pro-search", - "canonical_slug": "perplexity/sonar-pro-search", - "hugging_face_id": "", "name": "Perplexity: Sonar Pro Search", - "created": 1761854366, - "description": "Exclusively available on the OpenRouter API, Sonar Pro's new Pro Search mode is Perplexity's most advanced agentic search system. It is designed for deeper reasoning and analysis. Pricing is based on tokens plus $18 per thousand requests. This model powers the Pro Search mode on the Perplexity platform.\n\nSonar Pro Search adds autonomous, multi-step reasoning to Sonar Pro. So, instead of just one query + synthesis, it plans and executes entire research workflows using tools.", + "description": "Exclusively available on the OpenRouter API, Sonar Pro's new Pro Search mode is Perplexity's most advanced agentic search system.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0.018", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 8000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "structured_outputs", - "temperature", - "top_k", - "top_p", - "web_search_options" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "mistralai/voxtral-small-24b-2507", - "canonical_slug": "mistralai/voxtral-small-24b-2507", - "hugging_face_id": "mistralai/Voxtral-Small-24B-2507", "name": "Mistral: Voxtral Small 24B 2507", - "created": 1761835144, - "description": "Voxtral Small is an enhancement of Mistral Small 3, incorporating state-of-the-art audio input capabilities while retaining best-in-class text performance. It excels at speech transcription, translation and audio understanding. Input audio is priced at $100 per million seconds.", + "description": "Voxtral Small is an enhancement of Mistral Small 3, incorporating state-of-the-art audio input capabilities while retaining best-in-class text performance.", "context_length": 32000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text", "audio"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000003", - "request": "0", - "image": "0", - "audio": "0.0001", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.2, - "top_p": 0.95, - "frequency_penalty": null - } + "hugging_face_id": "mistralai/Voxtral-Small-24B-2507" }, { "id": "openai/gpt-oss-safeguard-20b", - "canonical_slug": "openai/gpt-oss-safeguard-20b", - "hugging_face_id": "openai/gpt-oss-safeguard-20b", "name": "OpenAI: gpt-oss-safeguard-20b", - "created": 1761752836, - "description": "gpt-oss-safeguard-20b is a safety reasoning model from OpenAI built upon gpt-oss-20b. This open-weight, 21B-parameter Mixture-of-Experts (MoE) model offers lower latency for safety tasks like content classification, LLM filtering, and trust & safety labeling.\n\nLearn more about this model in OpenAI's gpt-oss-safeguard [user guide](https://cookbook.openai.com/articles/gpt-oss-safeguard-guide).", + "description": "gpt-oss-safeguard-20b is a safety reasoning model from OpenAI built upon gpt-oss-20b.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000000075", - "completion": "0.0000003", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000037" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "openai/gpt-oss-safeguard-20b" }, { "id": "nvidia/nemotron-nano-12b-v2-vl:free", - "canonical_slug": "nvidia/nemotron-nano-12b-v2-vl", - "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16", "name": "NVIDIA: Nemotron Nano 12B 2 VL (free)", - "created": 1761675565, - "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba’s memory-efficient sequence modeling for significantly higher throughput and lower latency.\n\nThe model supports inputs of text and multi-image documents, producing natural-language outputs. It is trained on high-quality NVIDIA-curated synthetic datasets optimized for optical-character recognition, chart reasoning, and multimodal comprehension.\n\nNemotron Nano 2 VL achieves leading results on OCRBench v2 and scores ≈ 74 average across MMMU, MathVista, AI2D, OCRBench, OCR-Reasoning, ChartQA, DocVQA, and Video-MME—surpassing prior open VL baselines. With Efficient Video Sampling (EVS), it handles long-form videos while reducing inference cost.\n\nOpen-weights, training data, and fine-tuning recipes are released under a permissive NVIDIA open license, with deployment supported across NeMo, NIM, and major inference runtimes.", + "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "video"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 128000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "reasoning", - "tool_choice", - "tools" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16" }, { "id": "nvidia/nemotron-nano-12b-v2-vl", - "canonical_slug": "nvidia/nemotron-nano-12b-v2-vl", - "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16", "name": "NVIDIA: Nemotron Nano 12B 2 VL", - "created": 1761675565, - "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba’s memory-efficient sequence modeling for significantly higher throughput and lower latency.\n\nThe model supports inputs of text and multi-image documents, producing natural-language outputs. It is trained on high-quality NVIDIA-curated synthetic datasets optimized for optical-character recognition, chart reasoning, and multimodal comprehension.\n\nNemotron Nano 2 VL achieves leading results on OCRBench v2 and scores ≈ 74 average across MMMU, MathVista, AI2D, OCRBench, OCR-Reasoning, ChartQA, DocVQA, and Video-MME—surpassing prior open VL baselines. With Efficient Video Sampling (EVS), it handles long-form videos while reducing inference cost.\n\nOpen-weights, training data, and fine-tuning recipes are released under a permissive NVIDIA open license, with deployment supported across NeMo, NIM, and major inference runtimes.", + "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "video"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16" }, { "id": "minimax/minimax-m2", - "canonical_slug": "minimax/minimax-m2", - "hugging_face_id": "MiniMaxAI/MiniMax-M2", "name": "MiniMax: MiniMax M2", - "created": 1761252093, - "description": "MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows. With 10 billion activated parameters (230 billion total), it delivers near-frontier intelligence across general reasoning, tool use, and multi-step task execution while maintaining low latency and deployment efficiency.\n\nThe model excels in code generation, multi-file editing, compile-run-fix loops, and test-validated repair, showing strong results on SWE-Bench Verified, Multi-SWE-Bench, and Terminal-Bench. It also performs competitively in agentic evaluations such as BrowseComp and GAIA, effectively handling long-horizon planning, retrieval, and recovery from execution errors.\n\nBenchmarked by [Artificial Analysis](https://artificialanalysis.ai/models/minimax-m2), MiniMax-M2 ranks among the top open-source models for composite intelligence, spanning mathematics, science, and instruction-following. Its small activation footprint enables fast inference, high concurrency, and improved unit economics, making it well-suited for large-scale agents, developer assistants, and reasoning-driven applications that require responsiveness and cost efficiency.\n\nTo avoid degrading this model's performance, MiniMax highly recommends preserving reasoning between turns. Learn more about using reasoning_details to pass back reasoning in our [docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks).", + "description": "MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows.", "context_length": 204800, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000000255", - "completion": "0.00000102", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 204800, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": 1, - "top_p": 0.95, - "frequency_penalty": null - } + "hugging_face_id": "MiniMaxAI/MiniMax-M2" }, { "id": "liquid/lfm2-8b-a1b", - "canonical_slug": "liquid/lfm2-8b-a1b", - "hugging_face_id": "LiquidAI/LFM2-8B-A1B", "name": "LiquidAI/LFM2-8B-A1B", - "created": 1760970984, - "description": "Model created via inbox interface", + "description": "Model created via inbox interface.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.0000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "LiquidAI/LFM2-8B-A1B" }, { "id": "liquid/lfm-2.2-6b", - "canonical_slug": "liquid/lfm-2.2-6b", - "hugging_face_id": "LiquidAI/LFM2-2.6B", "name": "LiquidAI/LFM2-2.6B", - "created": 1760970889, - "description": "LFM2 is a new generation of hybrid models developed by Liquid AI, specifically designed for edge AI and on-device deployment. It sets a new standard in terms of quality, speed, and memory efficiency.", + "description": "LFM2 is a new generation of hybrid models developed by Liquid AI, specifically designed for edge AI and on-device deployment.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.0000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "LiquidAI/LFM2-2.6B" }, { "id": "ibm-granite/granite-4.0-h-micro", - "canonical_slug": "ibm-granite/granite-4.0-h-micro", - "hugging_face_id": "ibm-granite/granite-4.0-h-micro", "name": "IBM: Granite 4.0 Micro", - "created": 1760927695, - "description": "Granite-4.0-H-Micro is a 3B parameter from the Granite 4 family of models. These models are the latest in a series of models released by IBM. They are fine-tuned for long context tool calling. ", + "description": "Granite-4.0-H-Micro is a 3B parameter from the Granite 4 family of models.", "context_length": 131000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000000017", - "completion": "0.00000011", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "ibm-granite/granite-4.0-h-micro" }, { "id": "deepcogito/cogito-v2-preview-llama-405b", - "canonical_slug": "deepcogito/cogito-v2-preview-llama-405b", - "hugging_face_id": "deepcogito/cogito-v2-preview-llama-405B", "name": "Deep Cogito: Cogito V2 Preview Llama 405B", - "created": 1760709933, - "description": "Cogito v2 405B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection. It represents a significant step toward frontier intelligence with dense architecture delivering performance competitive with leading closed models. This advanced reasoning system combines policy improvement with massive scale for exceptional capabilities.\n", + "description": "Cogito v2 405B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000035", - "completion": "0.0000035", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "deepcogito/cogito-v2-preview-llama-405B" }, { "id": "openai/gpt-5-image-mini", - "canonical_slug": "openai/gpt-5-image-mini", - "hugging_face_id": "", "name": "OpenAI: GPT-5 Image Mini", - "created": 1760624583, - "description": "GPT-5 Image Mini combines OpenAI's advanced language capabilities, powered by [GPT-5 Mini](https://openrouter.ai/openai/gpt-5-mini), with GPT Image 1 Mini for efficient image generation. This natively multimodal model features superior instruction following, text rendering, and detailed image editing with reduced latency and cost. It excels at high-quality visual creation while maintaining strong text understanding, making it ideal for applications that require both efficient image generation and text processing at scale.", + "description": "GPT-5 Image Mini combines OpenAI's advanced language capabilities, powered by [GPT-5 Mini](https://openrouter.ai/openai/gpt-5-mini), with GPT Image 1 Mini for efficient image generation.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext+image", - "input_modalities": ["file", "image", "text"], - "output_modalities": ["image", "text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.000002", - "request": "0", - "image": "0.0000025", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.00000025" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 128000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "anthropic/claude-haiku-4.5", - "canonical_slug": "anthropic/claude-4.5-haiku-20251001", - "hugging_face_id": "", "name": "Anthropic: Claude Haiku 4.5", - "created": 1760547638, - "description": "Claude Haiku 4.5 is Anthropic’s fastest and most efficient model, delivering near-frontier intelligence at a fraction of the cost and latency of larger Claude models. Matching Claude Sonnet 4’s performance across reasoning, coding, and computer-use tasks, Haiku 4.5 brings frontier-level capability to real-time and high-volume applications.\n\nIt introduces extended thinking to the Haiku line; enabling controllable reasoning depth, summarized or interleaved thought output, and tool-assisted workflows with full support for coding, bash, web search, and computer-use tools. Scoring \u003E73% on SWE-bench Verified, Haiku 4.5 ranks among the world’s best coding models while maintaining exceptional responsiveness for sub-agents, parallelized execution, and scaled deployment.", + "description": "Claude Haiku 4.5 is Anthropic’s fastest and most efficient model, delivering near-frontier intelligence at a fraction of the cost and latency of larger Claude models.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000001", - "input_cache_write": "0.00000125" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 64000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen3-vl-8b-thinking", - "canonical_slug": "qwen/qwen3-vl-8b-thinking", - "hugging_face_id": "Qwen/Qwen3-VL-8B-Thinking", "name": "Qwen: Qwen3 VL 8B Thinking", - "created": 1760463746, - "description": "Qwen3-VL-8B-Thinking is the reasoning-optimized variant of the Qwen3-VL-8B multimodal model, designed for advanced visual and textual reasoning across complex scenes, documents, and temporal sequences. It integrates enhanced multimodal alignment and long-context processing (native 256K, expandable to 1M tokens) for tasks such as scientific visual analysis, causal inference, and mathematical reasoning over image or video inputs.\n\nCompared to the Instruct edition, the Thinking version introduces deeper visual-language fusion and deliberate reasoning pathways that improve performance on long-chain logic tasks, STEM problem-solving, and multi-step video understanding. It achieves stronger temporal grounding via Interleaved-MRoPE and timestamp-aware embeddings, while maintaining robust OCR, multilingual comprehension, and text generation on par with large text-only LLMs.", + "description": "Qwen3-VL-8B-Thinking is the reasoning-optimized variant of the Qwen3-VL-8B multimodal model, designed for advanced visual and textual reasoning across complex scenes, documents, and temporal sequences.", "context_length": 256000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000018", - "completion": "0.0000021", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 1, - "top_p": 0.95 - } + "hugging_face_id": "Qwen/Qwen3-VL-8B-Thinking" }, { "id": "qwen/qwen3-vl-8b-instruct", - "canonical_slug": "qwen/qwen3-vl-8b-instruct", - "hugging_face_id": "Qwen/Qwen3-VL-8B-Instruct", "name": "Qwen: Qwen3 VL 8B Instruct", - "created": 1760463308, - "description": "Qwen3-VL-8B-Instruct is a multimodal vision-language model from the Qwen3-VL series, built for high-fidelity understanding and reasoning across text, images, and video. It features improved multimodal fusion with Interleaved-MRoPE for long-horizon temporal reasoning, DeepStack for fine-grained visual-text alignment, and text-timestamp alignment for precise event localization.\n\nThe model supports a native 256K-token context window, extensible to 1M tokens, and handles both static and dynamic media inputs for tasks like document parsing, visual question answering, spatial reasoning, and GUI control. It achieves text understanding comparable to leading LLMs while expanding OCR coverage to 32 languages and enhancing robustness under varied visual conditions.", + "description": "Qwen3-VL-8B-Instruct is a multimodal vision-language model from the Qwen3-VL series, built for high-fidelity understanding and reasoning across text, images, and video.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000008", - "completion": "0.0000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.7, - "top_p": 0.8, - "frequency_penalty": null - } + "hugging_face_id": "Qwen/Qwen3-VL-8B-Instruct" }, { "id": "openai/gpt-5-image", - "canonical_slug": "openai/gpt-5-image", - "hugging_face_id": "", "name": "OpenAI: GPT-5 Image", - "created": 1760447986, - "description": "[GPT-5](https://openrouter.ai/openai/gpt-5) Image combines OpenAI's most advanced language model with state-of-the-art image generation capabilities. It offers major improvements in reasoning, code quality, and user experience while incorporating GPT Image 1's superior instruction following, text rendering, and detailed image editing.", + "description": "[GPT-5](https://openrouter.ai/openai/gpt-5) Image combines OpenAI's most advanced language model with state-of-the-art image generation capabilities.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext+image", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["image", "text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00001", - "completion": "0.00001", - "request": "0", - "image": "0.00001", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.00000125" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 128000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "inclusionai/ring-1t", - "canonical_slug": "inclusionai/ring-1t", - "hugging_face_id": "inclusionAI/Ring-1T", "name": "inclusionAI: Ring 1T", - "created": 1760384099, - "description": "Ring-1T has undergone continued scaling with large-scale verifiable reward reinforcement learning (RLVR) training, further unlocking the natural language reasoning capabilities of the trillion-parameter foundation model. Through RLHF training, the model's general abilities have also been refined, making this release of Ring-1T more balanced in performance across various tasks.\n\nRing-1T adopts the Ling 2.0 architecture and is trained on the Ling-1T-base foundation model, which contains 1 trillion total parameters with 50 billion activated parameters, supporting a context window of up to 128K tokens.", + "description": "Ring-1T has undergone continued scaling with large-scale verifiable reward reinforcement learning (RLVR) training, further unlocking the natural language reasoning capabilities of the.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000057", - "completion": "0.00000228", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "reasoning", - "response_format", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "inclusionAI/Ring-1T" }, { "id": "inclusionai/ling-1t", - "canonical_slug": "inclusionai/ling-1t", - "hugging_face_id": "inclusionAI/Ling-1T", "name": "inclusionAI: Ling-1T", - "created": 1760316076, - "description": "Ling-1T is a trillion-parameter open-weight large language model developed by inclusionAI and released under the MIT license. It represents the first flagship non-thinking model in the Ling 2.0 series, built around a sparse-activation architecture with roughly 50 billion active parameters per token. The model supports up to 128 K tokens of context and emphasizes efficient reasoning through an “Evolutionary Chain-of-Thought (Evo-CoT)” training strategy.\n\nPre-trained on more than 20 trillion reasoning-dense tokens, Ling-1T achieves strong results across code generation, mathematics, and logical reasoning benchmarks while maintaining high inference efficiency. It employs FP8 mixed-precision training, MoE routing with QK normalization, and MTP layers for compositional reasoning stability. The model also introduces LPO (Linguistics-unit Policy Optimization) for post-training alignment, enhancing sentence-level semantic control.\n\nLing-1T can perform complex text generation, multilingual reasoning, and front-end code synthesis with a focus on both functionality and aesthetics.", + "description": "Ling-1T is a trillion-parameter open-weight large language model developed by inclusionAI and released under the MIT license.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000057", - "completion": "0.00000228", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "response_format", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "inclusionAI/Ling-1T" }, { "id": "openai/o3-deep-research", - "canonical_slug": "openai/o3-deep-research-2025-06-26", - "hugging_face_id": "", "name": "OpenAI: o3 Deep Research", - "created": 1760129661, - "description": "o3-deep-research is OpenAI's advanced model for deep research, designed to tackle complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost.", + "description": "o3-deep-research is OpenAI's advanced model for deep research, designed to tackle complex, multi-step research tasks.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00001", - "completion": "0.00004", - "request": "0", - "image": "0.00765", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.0000025" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "openai/o4-mini-deep-research", - "canonical_slug": "openai/o4-mini-deep-research-2025-06-26", - "hugging_face_id": "", "name": "OpenAI: o4 Mini Deep Research", - "created": 1760129642, - "description": "o4-mini-deep-research is OpenAI's faster, more affordable deep research model—ideal for tackling complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost.", + "description": "o4-mini-deep-research is OpenAI's faster, more affordable deep research model—ideal for tackling complex, multi-step research tasks.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["file", "image", "text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000008", - "request": "0", - "image": "0.00153", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.0000005" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "nvidia/llama-3.3-nemotron-super-49b-v1.5", - "canonical_slug": "nvidia/llama-3.3-nemotron-super-49b-v1.5", - "hugging_face_id": "nvidia/Llama-3_3-Nemotron-Super-49B-v1_5", "name": "NVIDIA: Llama 3.3 Nemotron Super 49B V1.5", - "created": 1760101395, - "description": "Llama-3.3-Nemotron-Super-49B-v1.5 is a 49B-parameter, English-centric reasoning/chat model derived from Meta’s Llama-3.3-70B-Instruct with a 128K context. It’s post-trained for agentic workflows (RAG, tool calling) via SFT across math, code, science, and multi-turn chat, followed by multiple RL stages; Reward-aware Preference Optimization (RPO) for alignment, RL with Verifiable Rewards (RLVR) for step-wise reasoning, and iterative DPO to refine tool-use behavior. A distillation-driven Neural Architecture Search (“Puzzle”) replaces some attention blocks and varies FFN widths to shrink memory footprint and improve throughput, enabling single-GPU (H100/H200) deployment while preserving instruction following and CoT quality.\n\nIn internal evaluations (NeMo-Skills, up to 16 runs, temp = 0.6, top_p = 0.95), the model reports strong reasoning/coding results, e.g., MATH500 pass@1 = 97.4, AIME-2024 = 87.5, AIME-2025 = 82.71, GPQA = 71.97, LiveCodeBench (24.10–25.02) = 73.58, and MMLU-Pro (CoT) = 79.53. The model targets practical inference efficiency (high tokens/s, reduced VRAM) with Transformers/vLLM support and explicit “reasoning on/off” modes (chat-first defaults, greedy recommended when disabled). Suitable for building agents, assistants, and long-context retrieval systems where balanced accuracy-to-cost and reliable tool use matter.\n", + "description": "Llama-3.3-Nemotron-Super-49B-v1.5 is a 49B-parameter, English-centric reasoning/chat model derived from Meta’s Llama-3.3-70B-Instruct with a 128K context.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": null + "hugging_face_id": "nvidia/Llama-3_3-Nemotron-Super-49B-v1_5" }, { "id": "baidu/ernie-4.5-21b-a3b-thinking", - "canonical_slug": "baidu/ernie-4.5-21b-a3b-thinking", - "hugging_face_id": "baidu/ERNIE-4.5-21B-A3B-Thinking", "name": "Baidu: ERNIE 4.5 21B A3B Thinking", - "created": 1760048887, - "description": "ERNIE-4.5-21B-A3B-Thinking is Baidu's upgraded lightweight MoE model, refined to boost reasoning depth and quality for top-tier performance in logical puzzles, math, science, coding, text generation, and expert-level academic benchmarks.", + "description": "ERNIE-4.5-21B-A3B-Thinking is Baidu's upgraded lightweight MoE model, refined to boost reasoning depth and quality for top-tier performance in logical puzzles, math, science, coding, text generation,.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000007", - "completion": "0.00000028", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.6, - "top_p": 0.95, - "frequency_penalty": null - } + "hugging_face_id": "baidu/ERNIE-4.5-21B-A3B-Thinking" }, { "id": "google/gemini-2.5-flash-image", - "canonical_slug": "google/gemini-2.5-flash-image", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Flash Image (Nano Banana)", - "created": 1759870431, - "description": "Gemini 2.5 Flash Image, a.k.a. \"Nano Banana,\" is now generally available. It is a state of the art image generation model with contextual understanding. It is capable of image generation, edits, and multi-turn conversations. Aspect ratios can be controlled with the [image_config API Parameter](https://openrouter.ai/docs/features/multimodal/image-generation#image-aspect-ratio-configuration)", + "description": "Gemini 2.5 Flash Image, a.k.a.", "context_length": 32768, - "architecture": { - "modality": "text+image-\u003Etext+image", - "input_modalities": ["image", "text"], - "output_modalities": ["image", "text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000025", - "request": "0", - "image": "0.001238", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs", - "temperature", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen3-vl-30b-a3b-thinking", - "canonical_slug": "qwen/qwen3-vl-30b-a3b-thinking", - "hugging_face_id": "Qwen/Qwen3-VL-30B-A3B-Thinking", "name": "Qwen: Qwen3 VL 30B A3B Thinking", - "created": 1759794479, - "description": "Qwen3-VL-30B-A3B-Thinking is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Thinking variant enhances reasoning in STEM, math, and complex tasks. It excels in perception of real-world/synthetic categories, 2D/3D spatial grounding, and long-form visual comprehension, achieving competitive multimodal benchmark results. For agentic use, it handles multi-image multi-turn instructions, video timeline alignments, GUI automation, and visual coding from sketches to debugged UI. Text performance matches flagship Qwen3 models, suiting document AI, OCR, UI assistance, spatial tasks, and agent research.", + "description": "Qwen3-VL-30B-A3B-Thinking is a multimodal model that unifies strong text generation with visual understanding for images and videos.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.8, - "top_p": 0.95 - } + "hugging_face_id": "Qwen/Qwen3-VL-30B-A3B-Thinking" }, { "id": "qwen/qwen3-vl-30b-a3b-instruct", - "canonical_slug": "qwen/qwen3-vl-30b-a3b-instruct", - "hugging_face_id": "Qwen/Qwen3-VL-30B-A3B-Instruct", "name": "Qwen: Qwen3 VL 30B A3B Instruct", - "created": 1759794476, - "description": "Qwen3-VL-30B-A3B-Instruct is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Instruct variant optimizes instruction-following for general multimodal tasks. It excels in perception of real-world/synthetic categories, 2D/3D spatial grounding, and long-form visual comprehension, achieving competitive multimodal benchmark results. For agentic use, it handles multi-image multi-turn instructions, video timeline alignments, GUI automation, and visual coding from sketches to debugged UI. Text performance matches flagship Qwen3 models, suiting document AI, OCR, UI assistance, spatial tasks, and agent research.", + "description": "Qwen3-VL-30B-A3B-Instruct is a multimodal model that unifies strong text generation with visual understanding for images and videos.", "context_length": 262144, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.0000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": 0.7, - "top_p": 0.8, - "frequency_penalty": null - } + "hugging_face_id": "Qwen/Qwen3-VL-30B-A3B-Instruct" }, { "id": "openai/gpt-5-pro", - "canonical_slug": "openai/gpt-5-pro-2025-10-06", - "hugging_face_id": "", "name": "OpenAI: GPT-5 Pro", - "created": 1759776663, - "description": "GPT-5 Pro is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and accuracy in high-stakes use cases. It supports test-time routing features and advanced prompt understanding, including user-specified intent like \"think hard about this.\" Improvements include reductions in hallucination, sycophancy, and better performance in coding, writing, and health-related tasks.", + "description": "GPT-5 Pro is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000015", - "completion": "0.00012", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 128000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "z-ai/glm-4.6", - "canonical_slug": "z-ai/glm-4.6", - "hugging_face_id": "", "name": "Z.AI: GLM 4.6", - "created": 1759235576, - "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.\nSuperior coding performance: The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.\nAdvanced reasoning: GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.\nMore capable agents: GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.\nRefined writing: Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.", + "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex.", "context_length": 202752, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.00000175", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 202752, - "max_completion_tokens": 202752, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_a", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": 0.6, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "z-ai/glm-4.6:exacto", - "canonical_slug": "z-ai/glm-4.6", - "hugging_face_id": "", "name": "Z.AI: GLM 4.6 (exacto)", - "created": 1759235576, - "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.\nSuperior coding performance: The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.\nAdvanced reasoning: GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.\nMore capable agents: GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.\nRefined writing: Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.", + "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex.", "context_length": 202752, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000045", - "completion": "0.0000019", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 202752, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.6, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "anthropic/claude-sonnet-4.5", - "canonical_slug": "anthropic/claude-4.5-sonnet-20250929", - "hugging_face_id": "", "name": "Anthropic: Claude Sonnet 4.5", - "created": 1759161676, - "description": "Claude Sonnet 4.5 is Anthropic’s most advanced Sonnet model to date, optimized for real-world agents and coding workflows. It delivers state-of-the-art performance on coding benchmarks such as SWE-bench Verified, with improvements across system design, code security, and specification adherence. The model is designed for extended autonomous operation, maintaining task continuity across sessions and providing fact-based progress tracking.\n\nSonnet 4.5 also introduces stronger agentic capabilities, including improved tool orchestration, speculative parallel execution, and more efficient context and memory management. With enhanced context tracking and awareness of token usage across tool calls, it is particularly well-suited for multi-context and long-running workflows. Use cases span software engineering, cybersecurity, financial analysis, research agents, and other domains requiring sustained reasoning and tool use.", + "description": "Claude Sonnet 4.5 is Anthropic’s most advanced Sonnet model to date, optimized for real-world agents and coding workflows.", "context_length": 1000000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000003", - "input_cache_write": "0.00000375" - }, - "top_provider": { - "context_length": 1000000, - "max_completion_tokens": 64000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 1, - "top_p": 1, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "deepseek/deepseek-v3.2-exp", - "canonical_slug": "deepseek/deepseek-v3.2-exp", - "hugging_face_id": "deepseek-ai/DeepSeek-V3.2-Exp", "name": "DeepSeek: DeepSeek V3.2 Exp", - "created": 1759150481, - "description": "DeepSeek-V3.2-Exp is an experimental large language model released by DeepSeek as an intermediate step between V3.1 and future architectures. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism designed to improve training and inference efficiency in long-context scenarios while maintaining output quality. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model was trained under conditions aligned with V3.1-Terminus to enable direct comparison. Benchmarking shows performance roughly on par with V3.1 across reasoning, coding, and agentic tool-use tasks, with minor tradeoffs and gains depending on the domain. This release focuses on validating architectural optimizations for extended context lengths rather than advancing raw task accuracy, making it primarily a research-oriented model for exploring efficient transformer designs.", + "description": "DeepSeek-V3.2-Exp is an experimental large language model released by DeepSeek as an intermediate step between V3.1 and future architectures.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-v3.1" - }, - "pricing": { - "prompt": "0.00000027", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": 0.6, - "top_p": 0.95, - "frequency_penalty": null - } + "hugging_face_id": "deepseek-ai/DeepSeek-V3.2-Exp" }, { "id": "thedrummer/cydonia-24b-v4.1", - "canonical_slug": "thedrummer/cydonia-24b-v4.1", - "hugging_face_id": "thedrummer/cydonia-24b-v4.1", "name": "TheDrummer: Cydonia 24B V4.1", - "created": 1758931878, "description": "Uncensored and creative writing model based on Mistral Small 3.2 24B with good recall, prompt adherence, and intelligence.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "thedrummer/cydonia-24b-v4.1" }, { "id": "relace/relace-apply-3", - "canonical_slug": "relace/relace-apply-3", - "hugging_face_id": "", "name": "Relace: Relace Apply 3", - "created": 1758891572, - "description": "Relace Apply 3 is a specialized code-patching LLM that merges AI-suggested edits straight into your source files. It can apply updates from GPT-4o, Claude, and others into your files at 10,000 tokens/sec on average.\n\nThe model requires the prompt to be in the following format: \n\u003Cinstruction\u003E{instruction}\u003C/instruction\u003E\n\u003Ccode\u003E{initial_code}\u003C/code\u003E\n\u003Cupdate\u003E{edit_snippet}\u003C/update\u003E\n\nZero Data Retention is enabled for Relace. Learn more about this model in their [documentation](https://docs.relace.ai/api-reference/instant-apply/apply)", + "description": "Relace Apply 3 is a specialized code-patching LLM that merges AI-suggested edits straight into your source files.", "context_length": 256000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000085", - "completion": "0.00000125", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": 128000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "seed", "stop"], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "google/gemini-2.5-flash-preview-09-2025", - "canonical_slug": "google/gemini-2.5-flash-preview-09-2025", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Flash Preview 09-2025", - "created": 1758820178, - "description": "Gemini 2.5 Flash Preview September 2025 Checkpoint is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in \"thinking\" capabilities, enabling it to provide responses with greater accuracy and nuanced context handling. \n\nAdditionally, Gemini 2.5 Flash is configurable through the \"max tokens for reasoning\" parameter, as described in the documentation (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning).", + "description": "Gemini 2.5 Flash Preview September 2025 Checkpoint is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "file", "text", "audio", "video"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000025", - "request": "0", - "image": "0.001238", - "audio": "0.000001", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000075", - "input_cache_write": "0.0000003833" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "google/gemini-2.5-flash-lite-preview-09-2025", - "canonical_slug": "google/gemini-2.5-flash-lite-preview-09-2025", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Flash Lite Preview 09-2025", - "created": 1758819686, - "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance across common benchmarks compared to earlier Flash models. By default, \"thinking\" (i.e. multi-pass reasoning) is disabled to prioritize speed, but developers can enable it via the [Reasoning API parameter](https://openrouter.ai/docs/use-cases/reasoning-tokens) to selectively trade off cost for intelligence. ", + "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file", "audio", "video"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen3-vl-235b-a22b-thinking", - "canonical_slug": "qwen/qwen3-vl-235b-a22b-thinking", - "hugging_face_id": "Qwen/Qwen3-VL-235B-A22B-Thinking", "name": "Qwen: Qwen3 VL 235B A22B Thinking", - "created": 1758668690, - "description": "Qwen3-VL-235B-A22B Thinking is a multimodal model that unifies strong text generation with visual understanding across images and video. The Thinking model is optimized for multimodal reasoning in STEM and math. The series emphasizes robust perception (recognition of diverse real-world and synthetic categories), spatial understanding (2D/3D grounding), and long-form visual comprehension, with competitive results on public multimodal benchmarks for both perception and reasoning.\n\nBeyond analysis, Qwen3-VL supports agentic interaction and tool use: it can follow complex instructions over multi-image, multi-turn dialogues; align text to video timelines for precise temporal queries; and operate GUI elements for automation tasks. The models also enable visual coding workflows, turning sketches or mockups into code and assisting with UI debugging, while maintaining strong text-only performance comparable to the flagship Qwen3 language models. This makes Qwen3-VL suitable for production scenarios spanning document AI, multilingual OCR, software/UI assistance, spatial/embodied tasks, and research on vision-language agents.", + "description": "Qwen3-VL-235B-A22B Thinking is a multimodal model that unifies strong text generation with visual understanding across images and video.", "context_length": 262144, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.8, - "top_p": 0.95, - "frequency_penalty": null - } + "hugging_face_id": "Qwen/Qwen3-VL-235B-A22B-Thinking" }, { "id": "qwen/qwen3-vl-235b-a22b-instruct", - "canonical_slug": "qwen/qwen3-vl-235b-a22b-instruct", - "hugging_face_id": "Qwen/Qwen3-VL-235B-A22B-Instruct", "name": "Qwen: Qwen3 VL 235B A22B Instruct", - "created": 1758668687, - "description": "Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video. The Instruct model targets general vision-language use (VQA, document parsing, chart/table extraction, multilingual OCR). The series emphasizes robust perception (recognition of diverse real-world and synthetic categories), spatial understanding (2D/3D grounding), and long-form visual comprehension, with competitive results on public multimodal benchmarks for both perception and reasoning.\n\nBeyond analysis, Qwen3-VL supports agentic interaction and tool use: it can follow complex instructions over multi-image, multi-turn dialogues; align text to video timelines for precise temporal queries; and operate GUI elements for automation tasks. The models also enable visual coding workflows—turning sketches or mockups into code and assisting with UI debugging—while maintaining strong text-only performance comparable to the flagship Qwen3 language models. This makes Qwen3-VL suitable for production scenarios spanning document AI, multilingual OCR, software/UI assistance, spatial/embodied tasks, and research on vision-language agents.", + "description": "Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video.", "context_length": 262144, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000022", - "completion": "0.00000088", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": 0.7, - "top_p": 0.8, - "frequency_penalty": null - } + "hugging_face_id": "Qwen/Qwen3-VL-235B-A22B-Instruct" }, { "id": "qwen/qwen3-max", - "canonical_slug": "qwen/qwen3-max", - "hugging_face_id": "", "name": "Qwen: Qwen3 Max", - "created": 1758662808, - "description": "Qwen3-Max is an updated release built on the Qwen3 series, offering major improvements in reasoning, instruction following, multilingual support, and long-tail knowledge coverage compared to the January 2025 version. It delivers higher accuracy in math, coding, logic, and science tasks, follows complex instructions in Chinese and English more reliably, reduces hallucinations, and produces higher-quality responses for open-ended Q&A, writing, and conversation. The model supports over 100 languages with stronger translation and commonsense reasoning, and is optimized for retrieval-augmented generation (RAG) and tool calling, though it does not include a dedicated “thinking” mode.", + "description": "Qwen3-Max is an updated release built on the Qwen3 series, offering major improvements in reasoning, instruction following, multilingual support, and long-tail knowledge coverage compared to the.", "context_length": 256000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000012", - "completion": "0.000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000024" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 1, - "top_p": 1, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen3-coder-plus", - "canonical_slug": "qwen/qwen3-coder-plus", - "hugging_face_id": "", "name": "Qwen: Qwen3 Coder Plus", - "created": 1758662707, - "description": "Qwen3 Coder Plus is Alibaba's proprietary version of the Open Source Qwen3 Coder 480B A35B. It is a powerful coding agent model specializing in autonomous programming via tool calling and environment interaction, combining coding proficiency with versatile general-purpose abilities.", + "description": "Qwen3 Coder Plus is Alibaba's proprietary version of the Open Source Qwen3 Coder 480B A35B.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000001" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "openai/gpt-5-codex", - "canonical_slug": "openai/gpt-5-codex", - "hugging_face_id": "", "name": "OpenAI: GPT-5 Codex", - "created": 1758643403, - "description": "GPT-5-Codex is a specialized version of GPT-5 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks. The model supports building projects from scratch, feature development, debugging, large-scale refactoring, and code review. Compared to GPT-5, Codex is more steerable, adheres closely to developer instructions, and produces cleaner, higher-quality code outputs. Reasoning effort can be adjusted with the `reasoning.effort` parameter. Read the [docs here](https://openrouter.ai/docs/use-cases/reasoning-tokens#reasoning-effort-level)\n\nCodex integrates into developer environments including the CLI, IDE extensions, GitHub, and cloud tasks. It adapts reasoning effort dynamically—providing fast responses for small tasks while sustaining extended multi-hour runs for large projects. The model is trained to perform structured code reviews, catching critical flaws by reasoning over dependencies and validating behavior against tests. It also supports multimodal inputs such as images or screenshots for UI development and integrates tool use for search, dependency installation, and environment setup. Codex is intended specifically for agentic coding applications.", + "description": "GPT-5-Codex is a specialized version of GPT-5 optimized for software engineering and coding workflows.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000125" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 128000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "deepseek/deepseek-v3.1-terminus", - "canonical_slug": "deepseek/deepseek-v3.1-terminus", - "hugging_face_id": "deepseek-ai/DeepSeek-V3.1-Terminus", "name": "DeepSeek: DeepSeek V3.1 Terminus", - "created": 1758548275, - "description": "DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language consistency and agent capabilities, further optimizing the model's performance in coding and search agents. It is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows. ", + "description": "DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-v3.1" - }, - "pricing": { - "prompt": "0.00000023", - "completion": "0.0000009", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": 163840, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "deepseek-ai/DeepSeek-V3.1-Terminus" }, { "id": "deepseek/deepseek-v3.1-terminus:exacto", - "canonical_slug": "deepseek/deepseek-v3.1-terminus", - "hugging_face_id": "deepseek-ai/DeepSeek-V3.1-Terminus", "name": "DeepSeek: DeepSeek V3.1 Terminus (exacto)", - "created": 1758548275, - "description": "DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language consistency and agent capabilities, further optimizing the model's performance in coding and search agents. It is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows. ", + "description": "DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-v3.1" - }, - "pricing": { - "prompt": "0.00000027", - "completion": "0.000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "deepseek-ai/DeepSeek-V3.1-Terminus" }, { "id": "x-ai/grok-4-fast", - "canonical_slug": "x-ai/grok-4-fast", - "hugging_face_id": "", "name": "xAI: Grok 4 Fast", - "created": 1758240090, - "description": "Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. It comes in two flavors: non-reasoning and reasoning. Read more about the model on xAI's [news post](http://x.ai/news/grok-4-fast). Reasoning can be enabled using the `reasoning` `enabled` parameter in the API. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#controlling-reasoning-tokens)", + "description": "Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window.", "context_length": 2000000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Grok", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000005" - }, - "top_provider": { - "context_length": 2000000, - "max_completion_tokens": 30000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "logprobs", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "alibaba/tongyi-deepresearch-30b-a3b:free", - "canonical_slug": "alibaba/tongyi-deepresearch-30b-a3b", - "hugging_face_id": "Alibaba-NLP/Tongyi-DeepResearch-30B-A3B", "name": "Tongyi DeepResearch 30B A3B (free)", - "created": 1758210804, - "description": "Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token. It's optimized for long-horizon, deep information-seeking tasks and delivers state-of-the-art performance on benchmarks like Humanity's Last Exam, BrowserComp, BrowserComp-ZH, WebWalkerQA, GAIA, xbench-DeepSearch, and FRAMES. This makes it superior for complex agentic search, reasoning, and multi-step problem-solving compared to prior models.\n\nThe model includes a fully automated synthetic data pipeline for scalable pre-training, fine-tuning, and reinforcement learning. It uses large-scale continual pre-training on diverse agentic data to boost reasoning and stay fresh. It also features end-to-end on-policy RL with a customized Group Relative Policy Optimization, including token-level gradients and negative sample filtering for stable training. The model supports ReAct for core ability checks and an IterResearch-based 'Heavy' mode for max performance through test-time scaling. It's ideal for advanced research agents, tool use, and heavy inference workflows.", + "description": "Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "Alibaba-NLP/Tongyi-DeepResearch-30B-A3B" }, { "id": "alibaba/tongyi-deepresearch-30b-a3b", - "canonical_slug": "alibaba/tongyi-deepresearch-30b-a3b", - "hugging_face_id": "Alibaba-NLP/Tongyi-DeepResearch-30B-A3B", "name": "Tongyi DeepResearch 30B A3B", - "created": 1758210804, - "description": "Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token. It's optimized for long-horizon, deep information-seeking tasks and delivers state-of-the-art performance on benchmarks like Humanity's Last Exam, BrowserComp, BrowserComp-ZH, WebWalkerQA, GAIA, xbench-DeepSearch, and FRAMES. This makes it superior for complex agentic search, reasoning, and multi-step problem-solving compared to prior models.\n\nThe model includes a fully automated synthetic data pipeline for scalable pre-training, fine-tuning, and reinforcement learning. It uses large-scale continual pre-training on diverse agentic data to boost reasoning and stay fresh. It also features end-to-end on-policy RL with a customized Group Relative Policy Optimization, including token-level gradients and negative sample filtering for stable training. The model supports ReAct for core ability checks and an IterResearch-based 'Heavy' mode for max performance through test-time scaling. It's ideal for advanced research agents, tool use, and heavy inference workflows.", + "description": "Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000009", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "Alibaba-NLP/Tongyi-DeepResearch-30B-A3B" }, { "id": "qwen/qwen3-coder-flash", - "canonical_slug": "qwen/qwen3-coder-flash", - "hugging_face_id": "", "name": "Qwen: Qwen3 Coder Flash", - "created": 1758115536, - "description": "Qwen3 Coder Flash is Alibaba's fast and cost efficient version of their proprietary Qwen3 Coder Plus. It is a powerful coding agent model specializing in autonomous programming via tool calling and environment interaction, combining coding proficiency with versatile general-purpose abilities.", + "description": "Qwen3 Coder Flash is Alibaba's fast and cost efficient version of their proprietary Qwen3 Coder Plus.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000008" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "arcee-ai/afm-4.5b", - "canonical_slug": "arcee-ai/afm-4.5b", - "hugging_face_id": "arcee-ai/AFM-4.5B", "name": "Arcee AI: AFM 4.5B", - "created": 1758040484, - "description": "AFM-4.5B is a 4.5 billion parameter instruction-tuned language model developed by Arcee AI. The model was pretrained on approximately 8 trillion tokens, including 6.5 trillion tokens of general data and 1.5 trillion tokens with an emphasis on mathematical reasoning and code generation. ", + "description": "AFM-4.5B is a 4.5 billion parameter instruction-tuned language model developed by Arcee AI.", "context_length": 65536, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000000048", - "completion": "0.00000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 65536, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "arcee-ai/AFM-4.5B" }, { "id": "opengvlab/internvl3-78b", - "canonical_slug": "opengvlab/internvl3-78b", - "hugging_face_id": "OpenGVLab/InternVL3-78B", "name": "OpenGVLab: InternVL3 78B", - "created": 1757962555, - "description": "The InternVL3 series is an advanced multimodal large language model (MLLM). Compared to InternVL 2.5, InternVL3 demonstrates stronger multimodal perception and reasoning capabilities. \n\nIn addition, InternVL3 is benchmarked against the Qwen2.5 Chat models, whose pre-trained base models serve as the initialization for its language component. Benefiting from Native Multimodal Pre-Training, the InternVL3 series surpasses the Qwen2.5 series in overall text performance.", + "description": "The InternVL3 series is an advanced multimodal large language model (MLLM).", "context_length": 32768, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000007", - "completion": "0.00000026", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "OpenGVLab/InternVL3-78B" }, { "id": "qwen/qwen3-next-80b-a3b-thinking", - "canonical_slug": "qwen/qwen3-next-80b-a3b-thinking-2509", - "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Thinking", "name": "Qwen: Qwen3 Next 80B A3B Thinking", - "created": 1757612284, - "description": "Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured “thinking” traces by default. It’s designed for hard multi-step problems; math proofs, code synthesis/debugging, logic, and agentic planning, and reports strong results across knowledge, reasoning, coding, alignment, and multilingual evaluations. Compared with prior Qwen3 variants, it emphasizes stability under long chains of thought and efficient scaling during inference, and it is tuned to follow complex instructions while reducing repetitive or off-task behavior.\n\nThe model is suitable for agent frameworks and tool use (function calling), retrieval-heavy workflows, and standardized benchmarking where step-by-step solutions are required. It supports long, detailed completions and leverages throughput-oriented techniques (e.g., multi-token prediction) for faster generation. Note that it operates in thinking-only mode.", + "description": "Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured “thinking” traces by default.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Thinking" }, { "id": "qwen/qwen3-next-80b-a3b-instruct", - "canonical_slug": "qwen/qwen3-next-80b-a3b-instruct-2509", - "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Instruct", "name": "Qwen: Qwen3 Next 80B A3B Instruct", - "created": 1757612213, - "description": "Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without “thinking” traces. It targets complex tasks across reasoning, code generation, knowledge QA, and multilingual use, while remaining robust on alignment and formatting. Compared with prior Qwen3 instruct variants, it focuses on higher throughput and stability on ultra-long inputs and multi-turn dialogues, making it well-suited for RAG, tool use, and agentic workflows that require consistent final answers rather than visible chain-of-thought.\n\nThe model employs scaling-efficient training and decoding to improve parameter efficiency and inference speed, and has been validated on a broad set of public benchmarks where it reaches or approaches larger Qwen3 systems in several categories while outperforming earlier mid-sized baselines. It is best used as a general assistant, code helper, and long-context task solver in production settings where deterministic, instruction-following outputs are preferred.", + "description": "Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without “thinking” traces.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000008", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Instruct" }, { "id": "meituan/longcat-flash-chat:free", - "canonical_slug": "meituan/longcat-flash-chat", - "hugging_face_id": "meituan-longcat/LongCat-Flash-Chat", "name": "Meituan: LongCat Flash Chat (free)", - "created": 1757427658, - "description": "LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input. It introduces a shortcut-connected MoE design to reduce communication overhead and achieve high throughput while maintaining training stability through advanced scaling strategies such as hyperparameter transfer, deterministic computation, and multi-stage optimization.\n\nThis release, LongCat-Flash-Chat, is a non-thinking foundation model optimized for conversational and agentic tasks. It supports long context windows up to 128K tokens and shows competitive performance across reasoning, coding, instruction following, and domain benchmarks, with particular strengths in tool use and complex multi-step interactions.", + "description": "LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meituan-longcat/LongCat-Flash-Chat" }, { "id": "meituan/longcat-flash-chat", - "canonical_slug": "meituan/longcat-flash-chat", - "hugging_face_id": "meituan-longcat/LongCat-Flash-Chat", "name": "Meituan: LongCat Flash Chat", - "created": 1757427658, - "description": "LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input. It introduces a shortcut-connected MoE design to reduce communication overhead and achieve high throughput while maintaining training stability through advanced scaling strategies such as hyperparameter transfer, deterministic computation, and multi-stage optimization.\n\nThis release, LongCat-Flash-Chat, is a non-thinking foundation model optimized for conversational and agentic tasks. It supports long context windows up to 128K tokens and shows competitive performance across reasoning, coding, instruction following, and domain benchmarks, with particular strengths in tool use and complex multi-step interactions.", + "description": "LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.00000075", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "temperature", "top_p"], - "default_parameters": {} + "hugging_face_id": "meituan-longcat/LongCat-Flash-Chat" }, { "id": "qwen/qwen-plus-2025-07-28", - "canonical_slug": "qwen/qwen-plus-2025-07-28", - "hugging_face_id": "", "name": "Qwen: Qwen Plus 0728", - "created": 1757347599, "description": "Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.", "context_length": 1000000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1000000, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen-plus-2025-07-28:thinking", - "canonical_slug": "qwen/qwen-plus-2025-07-28", - "hugging_face_id": "", "name": "Qwen: Qwen Plus 0728 (thinking)", - "created": 1757347599, "description": "Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.", "context_length": 1000000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1000000, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "nvidia/nemotron-nano-9b-v2:free", - "canonical_slug": "nvidia/nemotron-nano-9b-v2", - "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-9B-v2", "name": "NVIDIA: Nemotron Nano 9B V2 (free)", - "created": 1757106807, - "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and tasks by first generating a reasoning trace and then concluding with a final response. \n\nThe model's reasoning capabilities can be controlled via a system prompt. If the user prefers the model to provide its final answer without intermediate reasoning traces, it can be configured to do so.", + "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "reasoning", - "response_format", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-9B-v2" }, { "id": "nvidia/nemotron-nano-9b-v2", - "canonical_slug": "nvidia/nemotron-nano-9b-v2", - "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-9B-v2", "name": "NVIDIA: Nemotron Nano 9B V2", - "created": 1757106807, - "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and tasks by first generating a reasoning trace and then concluding with a final response. \n\nThe model's reasoning capabilities can be controlled via a system prompt. If the user prefers the model to provide its final answer without intermediate reasoning traces, it can be configured to do so.", + "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000004", - "completion": "0.00000016", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-9B-v2" }, { "id": "moonshotai/kimi-k2-0905", - "canonical_slug": "moonshotai/kimi-k2-0905", - "hugging_face_id": "moonshotai/Kimi-K2-Instruct-0905", "name": "MoonshotAI: Kimi K2 0905", - "created": 1757021147, - "description": "Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2). It is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It supports long-context inference up to 256k tokens, extended from the previous 128k.\n\nThis update improves agentic coding with higher accuracy and better generalization across scaffolds, and enhances frontend coding with more aesthetic and functional outputs for web, 3D, and related tasks. Kimi K2 is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. It excels across coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) benchmarks. The model is trained with a novel stack incorporating the MuonClip optimizer for stable large-scale MoE training.", + "description": "Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2).", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000039", - "completion": "0.0000019", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "moonshotai/Kimi-K2-Instruct-0905" }, { "id": "moonshotai/kimi-k2-0905:exacto", - "canonical_slug": "moonshotai/kimi-k2-0905", - "hugging_face_id": "moonshotai/Kimi-K2-Instruct-0905", "name": "MoonshotAI: Kimi K2 0905 (exacto)", - "created": 1757021147, - "description": "Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2). It is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It supports long-context inference up to 256k tokens, extended from the previous 128k.\n\nThis update improves agentic coding with higher accuracy and better generalization across scaffolds, and enhances frontend coding with more aesthetic and functional outputs for web, 3D, and related tasks. Kimi K2 is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. It excels across coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) benchmarks. The model is trained with a novel stack incorporating the MuonClip optimizer for stable large-scale MoE training.", + "description": "Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2).", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000006", - "completion": "0.0000025", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "moonshotai/Kimi-K2-Instruct-0905" }, { "id": "deepcogito/cogito-v2-preview-llama-70b", - "canonical_slug": "deepcogito/cogito-v2-preview-llama-70b", - "hugging_face_id": "deepcogito/cogito-v2-preview-llama-70B", "name": "Deep Cogito: Cogito V2 Preview Llama 70B", - "created": 1756831784, - "description": "Cogito v2 70B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection. Built with iterative policy improvement, it delivers strong performance across reasoning tasks while maintaining efficiency through shorter reasoning chains and improved intuition.", + "description": "Cogito v2 70B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000088", - "completion": "0.00000088", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "deepcogito/cogito-v2-preview-llama-70B" }, { "id": "deepcogito/cogito-v2-preview-llama-109b-moe", - "canonical_slug": "deepcogito/cogito-v2-preview-llama-109b-moe", - "hugging_face_id": "deepcogito/cogito-v2-preview-llama-109B-MoE", "name": "Cogito V2 Preview Llama 109B", - "created": 1756831568, - "description": "An instruction-tuned, hybrid-reasoning Mixture-of-Experts model built on Llama-4-Scout-17B-16E. Cogito v2 can answer directly or engage an extended “thinking” phase, with alignment guided by Iterated Distillation & Amplification (IDA). It targets coding, STEM, instruction following, and general helpfulness, with stronger multilingual, tool-calling, and reasoning performance than size-equivalent baselines. The model supports long-context use (up to 10M tokens) and standard Transformers workflows. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "description": "An instruction-tuned, hybrid-reasoning Mixture-of-Experts model built on Llama-4-Scout-17B-16E.", "context_length": 32767, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Llama4", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000018", - "completion": "0.00000059", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32767, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepcogito/cogito-v2-preview-llama-109B-MoE" }, { "id": "deepcogito/cogito-v2-preview-deepseek-671b", - "canonical_slug": "deepcogito/cogito-v2-preview-deepseek-671b", - "hugging_face_id": "deepcogito/cogito-v2-preview-deepseek-671B-MoE", "name": "Deep Cogito: Cogito V2 Preview Deepseek 671B", - "created": 1756830949, - "description": "Cogito v2 is a multilingual, instruction-tuned Mixture of Experts (MoE) large language model with 671 billion parameters. It supports both standard and reasoning-based generation modes. The model introduces hybrid reasoning via Iterated Distillation and Amplification (IDA)—an iterative self-improvement strategy designed to scale alignment with general intelligence. Cogito v2 has been optimized for STEM, programming, instruction following, and tool use. It supports 128k context length and offers strong performance in both multilingual and code-heavy environments. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "description": "Cogito v2 is a multilingual, instruction-tuned Mixture of Experts (MoE) large language model with 671 billion parameters.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00000125", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepcogito/cogito-v2-preview-deepseek-671B-MoE" }, { "id": "stepfun-ai/step3", - "canonical_slug": "stepfun-ai/step3", - "hugging_face_id": "stepfun-ai/step3", "name": "StepFun: Step3", - "created": 1756415375, - "description": "Step3 is a cutting-edge multimodal reasoning model—built on a Mixture-of-Experts architecture with 321B total parameters and 38B active. It is designed end-to-end to minimize decoding costs while delivering top-tier performance in vision–language reasoning. Through the co-design of Multi-Matrix Factorization Attention (MFA) and Attention-FFN Disaggregation (AFD), Step3 maintains exceptional efficiency across both flagship and low-end accelerators.", + "description": "Step3 is a cutting-edge multimodal reasoning model—built on a Mixture-of-Experts architecture with 321B total parameters and 38B active.", "context_length": 65536, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000057", - "completion": "0.00000142", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 65536, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "reasoning", - "response_format", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "stepfun-ai/step3" }, { "id": "qwen/qwen3-30b-a3b-thinking-2507", - "canonical_slug": "qwen/qwen3-30b-a3b-thinking-2507", - "hugging_face_id": "Qwen/Qwen3-30B-A3B-Thinking-2507", "name": "Qwen: Qwen3 30B A3B Thinking 2507", - "created": 1756399192, - "description": "Qwen3-30B-A3B-Thinking-2507 is a 30B parameter Mixture-of-Experts reasoning model optimized for complex tasks requiring extended multi-step thinking. The model is designed specifically for “thinking mode,” where internal reasoning traces are separated from final answers.\n\nCompared to earlier Qwen3-30B releases, this version improves performance across logical reasoning, mathematics, science, coding, and multilingual benchmarks. It also demonstrates stronger instruction following, tool use, and alignment with human preferences. With higher reasoning efficiency and extended output budgets, it is best suited for advanced research, competitive problem solving, and agentic applications requiring structured long-context reasoning.", + "description": "Qwen3-30B-A3B-Thinking-2507 is a 30B parameter Mixture-of-Experts reasoning model optimized for complex tasks requiring extended multi-step thinking.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000009", - "completion": "0.0000003", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-30B-A3B-Thinking-2507" }, { "id": "x-ai/grok-code-fast-1", - "canonical_slug": "x-ai/grok-code-fast-1", - "hugging_face_id": "", "name": "xAI: Grok Code Fast 1", - "created": 1756238927, - "description": "Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding. With reasoning traces visible in the response, developers can steer Grok Code for high-quality work flows.", + "description": "Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding.", "context_length": 256000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Grok", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000002" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": 10000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "logprobs", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "nousresearch/hermes-4-70b", - "canonical_slug": "nousresearch/hermes-4-70b", - "hugging_face_id": "NousResearch/Hermes-4-70B", "name": "Nous: Hermes 4 70B", - "created": 1756236182, - "description": "Hermes 4 70B is a hybrid reasoning model from Nous Research, built on Meta-Llama-3.1-70B. It introduces the same hybrid mode as the larger 405B release, allowing the model to either respond directly or generate explicit \u003Cthink\u003E...\u003C/think\u003E reasoning traces before answering. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThis 70B variant is trained with the expanded post-training corpus (~60B tokens) emphasizing verified reasoning data, leading to improvements in mathematics, coding, STEM, logic, and structured outputs while maintaining general assistant performance. It supports JSON mode, schema adherence, function calling, and tool use, and is designed for greater steerability with reduced refusal rates.", + "description": "Hermes 4 70B is a hybrid reasoning model from Nous Research, built on Meta-Llama-3.1-70B.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000011", - "completion": "0.00000038", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "NousResearch/Hermes-4-70B" }, { "id": "nousresearch/hermes-4-405b", - "canonical_slug": "nousresearch/hermes-4-405b", - "hugging_face_id": "NousResearch/Hermes-4-405B", "name": "Nous: Hermes 4 405B", - "created": 1756235463, - "description": "Hermes 4 is a large-scale reasoning model built on Meta-Llama-3.1-405B and released by Nous Research. It introduces a hybrid reasoning mode, where the model can choose to deliberate internally with \u003Cthink\u003E...\u003C/think\u003E traces or respond directly, offering flexibility between speed and depth. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model is instruction-tuned with an expanded post-training corpus (~60B tokens) emphasizing reasoning traces, improving performance in math, code, STEM, and logical reasoning, while retaining broad assistant utility. It also supports structured outputs, including JSON mode, schema adherence, function calling, and tool use. Hermes 4 is trained for steerability, lower refusal rates, and alignment toward neutral, user-directed behavior.", + "description": "Hermes 4 is a large-scale reasoning model built on Meta-Llama-3.1-405B and released by Nous Research.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "NousResearch/Hermes-4-405B" }, { "id": "google/gemini-2.5-flash-image-preview", - "canonical_slug": "google/gemini-2.5-flash-image-preview", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Flash Image Preview (Nano Banana)", - "created": 1756218977, - "description": "Gemini 2.5 Flash Image Preview, a.k.a. \"Nano Banana,\" is a state of the art image generation model with contextual understanding. It is capable of image generation, edits, and multi-turn conversations.", + "description": "Gemini 2.5 Flash Image Preview, a.k.a.", "context_length": 32768, - "architecture": { - "modality": "text+image-\u003Etext+image", - "input_modalities": ["image", "text"], - "output_modalities": ["image", "text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000025", - "request": "0", - "image": "0.001238", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs", - "temperature", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "deepseek/deepseek-chat-v3.1:free", - "canonical_slug": "deepseek/deepseek-chat-v3.1", - "hugging_face_id": "deepseek-ai/DeepSeek-V3.1", "name": "DeepSeek: DeepSeek V3.1 (free)", - "created": 1755779628, - "description": "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows. \n\nIt succeeds the [DeepSeek V3-0324](/deepseek/deepseek-chat-v3-0324) model and performs well on a variety of tasks.", + "description": "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates.", "context_length": 163800, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-v3.1" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163800, - "max_completion_tokens": null, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "seed", - "stop", - "temperature" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-V3.1" }, { "id": "deepseek/deepseek-chat-v3.1", - "canonical_slug": "deepseek/deepseek-chat-v3.1", - "hugging_face_id": "deepseek-ai/DeepSeek-V3.1", "name": "DeepSeek: DeepSeek V3.1", - "created": 1755779628, - "description": "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows. \n\nIt succeeds the [DeepSeek V3-0324](/deepseek/deepseek-chat-v3-0324) model and performs well on a variety of tasks.", + "description": "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-v3.1" - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000008", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": 163840, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-V3.1" }, { "id": "openai/gpt-4o-audio-preview", - "canonical_slug": "openai/gpt-4o-audio-preview", - "hugging_face_id": "", "name": "OpenAI: GPT-4o Audio", - "created": 1755233061, - "description": "The gpt-4o-audio-preview model adds support for audio inputs as prompts. This enhancement allows the model to detect nuances within audio recordings and add depth to generated user experiences. Audio outputs are currently not supported. Audio tokens are priced at $40 per million input audio tokens.", + "description": "The gpt-4o-audio-preview model adds support for audio inputs as prompts.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["audio", "text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.00001", - "request": "0", - "image": "0", - "audio": "0.00004", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/mistral-medium-3.1", - "canonical_slug": "mistralai/mistral-medium-3.1", - "hugging_face_id": "", "name": "Mistral: Mistral Medium 3.1", - "created": 1755095639, - "description": "Mistral Medium 3.1 is an updated version of Mistral Medium 3, which is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances state-of-the-art reasoning and multimodal performance with 8× lower cost compared to traditional large models, making it suitable for scalable deployments across professional and industrial use cases.\n\nThe model excels in domains such as coding, STEM reasoning, and enterprise adaptation. It supports hybrid, on-prem, and in-VPC deployments and is optimized for integration into custom workflows. Mistral Medium 3.1 offers competitive accuracy relative to larger models like Claude Sonnet 3.5/3.7, Llama 4 Maverick, and Command R+, while maintaining broad compatibility across cloud environments.", + "description": "Mistral Medium 3.1 is an updated version of Mistral Medium 3, which is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "baidu/ernie-4.5-21b-a3b", - "canonical_slug": "baidu/ernie-4.5-21b-a3b", - "hugging_face_id": "baidu/ERNIE-4.5-21B-A3B-PT", "name": "Baidu: ERNIE 4.5 21B A3B", - "created": 1755034167, - "description": "A sophisticated text-based Mixture-of-Experts (MoE) model featuring 21B total parameters with 3B activated per token, delivering exceptional multimodal understanding and generation through heterogeneous MoE structures and modality-isolated routing. Supporting an extensive 131K token context length, the model achieves efficient inference via multi-expert parallel collaboration and quantization, while advanced post-training techniques including SFT, DPO, and UPO ensure optimized performance across diverse applications with specialized routing and balancing losses for superior task handling.", + "description": "A sophisticated text-based Mixture-of-Experts (MoE) model featuring 21B total parameters with 3B activated per token, delivering exceptional multimodal understanding and generation through.", "context_length": 120000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000007", - "completion": "0.00000028", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 120000, - "max_completion_tokens": 8000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.8, - "top_p": 0.8, - "frequency_penalty": null - } + "hugging_face_id": "baidu/ERNIE-4.5-21B-A3B-PT" }, { "id": "baidu/ernie-4.5-vl-28b-a3b", - "canonical_slug": "baidu/ernie-4.5-vl-28b-a3b", - "hugging_face_id": "baidu/ERNIE-4.5-VL-28B-A3B-PT", "name": "Baidu: ERNIE 4.5 VL 28B A3B", - "created": 1755032836, - "description": "A powerful multimodal Mixture-of-Experts chat model featuring 28B total parameters with 3B activated per token, delivering exceptional text and vision understanding through its innovative heterogeneous MoE structure with modality-isolated routing. Built with scaling-efficient infrastructure for high-throughput training and inference, the model leverages advanced post-training techniques including SFT, DPO, and UPO for optimized performance, while supporting an impressive 131K context length and RLVR alignment for superior cross-modal reasoning and generation capabilities.", + "description": "A powerful multimodal Mixture-of-Experts chat model featuring 28B total parameters with 3B activated per token, delivering exceptional text and vision understanding through its innovative.", "context_length": 30000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000014", - "completion": "0.00000056", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 30000, - "max_completion_tokens": 8000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "baidu/ERNIE-4.5-VL-28B-A3B-PT" }, { "id": "z-ai/glm-4.5v", - "canonical_slug": "z-ai/glm-4.5v", - "hugging_face_id": "zai-org/GLM-4.5V", "name": "Z.AI: GLM 4.5V", - "created": 1754922288, - "description": "GLM-4.5V is a vision-language foundation model for multimodal agent applications. Built on a Mixture-of-Experts (MoE) architecture with 106B parameters and 12B activated parameters, it achieves state-of-the-art results in video understanding, image Q&A, OCR, and document parsing, with strong gains in front-end web coding, grounding, and spatial reasoning. It offers a hybrid inference mode: a \"thinking mode\" for deep reasoning and a \"non-thinking mode\" for fast responses. Reasoning behavior can be toggled via the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "description": "GLM-4.5V is a vision-language foundation model for multimodal agent applications.", "context_length": 65536, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000006", - "completion": "0.0000018", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000011" - }, - "top_provider": { - "context_length": 65536, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.75, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "zai-org/GLM-4.5V" }, { "id": "ai21/jamba-mini-1.7", - "canonical_slug": "ai21/jamba-mini-1.7", - "hugging_face_id": "ai21labs/AI21-Jamba-Mini-1.7", "name": "AI21: Jamba Mini 1.7", - "created": 1754670601, - "description": "Jamba Mini 1.7 is a compact and efficient member of the Jamba open model family, incorporating key improvements in grounding and instruction-following while maintaining the benefits of the SSM-Transformer hybrid architecture and 256K context window. Despite its compact size, it delivers accurate, contextually grounded responses and improved steerability.", + "description": "Jamba Mini 1.7 is a compact and efficient member of the Jamba open model family, incorporating key improvements in grounding and instruction-following while maintaining the benefits of the.", "context_length": 256000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "stop", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "ai21labs/AI21-Jamba-Mini-1.7" }, { "id": "ai21/jamba-large-1.7", - "canonical_slug": "ai21/jamba-large-1.7", - "hugging_face_id": "ai21labs/AI21-Jamba-Large-1.7", "name": "AI21: Jamba Large 1.7", - "created": 1754669020, - "description": "Jamba Large 1.7 is the latest model in the Jamba open family, offering improvements in grounding, instruction-following, and overall efficiency. Built on a hybrid SSM-Transformer architecture with a 256K context window, it delivers more accurate, contextually grounded responses and better steerability than previous versions.", + "description": "Jamba Large 1.7 is the latest model in the Jamba open family, offering improvements in grounding, instruction-following, and overall efficiency.", "context_length": 256000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000008", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "stop", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "ai21labs/AI21-Jamba-Large-1.7" }, { "id": "openai/gpt-5-chat", - "canonical_slug": "openai/gpt-5-chat-2025-08-07", - "hugging_face_id": "", "name": "OpenAI: GPT-5 Chat", - "created": 1754587837, "description": "GPT-5 Chat is designed for advanced, natural, multimodal, and context-aware conversations for enterprise applications.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["file", "image", "text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.000000125" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-5", - "canonical_slug": "openai/gpt-5-2025-08-07", - "hugging_face_id": "", "name": "OpenAI: GPT-5", - "created": 1754587413, - "description": "GPT-5 is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and accuracy in high-stakes use cases. It supports test-time routing features and advanced prompt understanding, including user-specified intent like \"think hard about this.\" Improvements include reductions in hallucination, sycophancy, and better performance in coding, writing, and health-related tasks.", + "description": "GPT-5 is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.000000125" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 128000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "openai/gpt-5-mini", - "canonical_slug": "openai/gpt-5-mini-2025-08-07", - "hugging_face_id": "", "name": "OpenAI: GPT-5 Mini", - "created": 1754587407, - "description": "GPT-5 Mini is a compact version of GPT-5, designed to handle lighter-weight reasoning tasks. It provides the same instruction-following and safety-tuning benefits as GPT-5, but with reduced latency and cost. GPT-5 Mini is the successor to OpenAI's o4-mini model.", + "description": "GPT-5 Mini is a compact version of GPT-5, designed to handle lighter-weight reasoning tasks.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000025", - "completion": "0.000002", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.000000025" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 128000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-5-nano", - "canonical_slug": "openai/gpt-5-nano-2025-08-07", - "hugging_face_id": "", "name": "OpenAI: GPT-5 Nano", - "created": 1754587402, - "description": "GPT-5-Nano is the smallest and fastest variant in the GPT-5 system, optimized for developer tools, rapid interactions, and ultra-low latency environments. While limited in reasoning depth compared to its larger counterparts, it retains key instruction-following and safety features. It is the successor to GPT-4.1-nano and offers a lightweight option for cost-sensitive or real-time applications.", + "description": "GPT-5-Nano is the smallest and fastest variant in the GPT-5 system, optimized for developer tools, rapid interactions, and ultra-low latency environments.", "context_length": 400000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.000000005" - }, - "top_provider": { - "context_length": 400000, - "max_completion_tokens": 128000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-oss-120b", - "canonical_slug": "openai/gpt-oss-120b", - "hugging_face_id": "openai/gpt-oss-120b", "name": "OpenAI: gpt-oss-120b", - "created": 1754414231, - "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized to run on a single H100 GPU with native MXFP4 quantization. The model supports configurable reasoning depth, full chain-of-thought access, and native tool use, including function calling, browsing, and structured output generation.", + "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000004", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "openai/gpt-oss-120b" }, { "id": "openai/gpt-oss-120b:exacto", - "canonical_slug": "openai/gpt-oss-120b", - "hugging_face_id": "openai/gpt-oss-120b", "name": "OpenAI: gpt-oss-120b (exacto)", - "created": 1754414231, - "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized to run on a single H100 GPU with native MXFP4 quantization. The model supports configurable reasoning depth, full chain-of-thought access, and native tool use, including function calling, browsing, and structured output generation.", + "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.00000024", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "openai/gpt-oss-120b" }, { "id": "openai/gpt-oss-20b:free", - "canonical_slug": "openai/gpt-oss-20b", - "hugging_face_id": "openai/gpt-oss-20b", "name": "OpenAI: gpt-oss-20b (free)", - "created": 1754414229, - "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for lower-latency inference and deployability on consumer or single-GPU hardware. The model is trained in OpenAI’s Harmony response format and supports reasoning level configuration, fine-tuning, and agentic capabilities including function calling, tool use, and structured outputs.", + "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "openai/gpt-oss-20b" }, { "id": "openai/gpt-oss-20b", - "canonical_slug": "openai/gpt-oss-20b", - "hugging_face_id": "openai/gpt-oss-20b", "name": "OpenAI: gpt-oss-20b", - "created": 1754414229, - "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for lower-latency inference and deployability on consumer or single-GPU hardware. The model is trained in OpenAI’s Harmony response format and supports reasoning level configuration, fine-tuning, and agentic capabilities including function calling, tool use, and structured outputs.", + "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000003", - "completion": "0.00000014", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "openai/gpt-oss-20b" }, { "id": "anthropic/claude-opus-4.1", - "canonical_slug": "anthropic/claude-4.1-opus-20250805", - "hugging_face_id": "", "name": "Anthropic: Claude Opus 4.1", - "created": 1754411591, - "description": "Claude Opus 4.1 is an updated version of Anthropic’s flagship model, offering improved performance in coding, reasoning, and agentic tasks. It achieves 74.5% on SWE-bench Verified and shows notable gains in multi-file code refactoring, debugging precision, and detail-oriented reasoning. The model supports extended thinking up to 64K tokens and is optimized for tasks involving research, data analysis, and tool-assisted reasoning.", + "description": "Claude Opus 4.1 is an updated version of Anthropic’s flagship model, offering improved performance in coding, reasoning, and agentic tasks.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000015", - "completion": "0.000075", - "request": "0", - "image": "0.024", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000015", - "input_cache_write": "0.00001875" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 32000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "mistralai/codestral-2508", - "canonical_slug": "mistralai/codestral-2508", - "hugging_face_id": "", "name": "Mistral: Codestral 2508", - "created": 1754079630, - "description": "Mistral's cutting-edge language model for coding released end of July 2025. Codestral specializes in low-latency, high-frequency tasks such as fill-in-the-middle (FIM), code correction and test generation.\n\n[Blog Post](https://mistral.ai/news/codestral-25-08)", + "description": "Mistral's cutting-edge language model for coding released end of July 2025.", "context_length": 256000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000009", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "qwen/qwen3-coder-30b-a3b-instruct", - "canonical_slug": "qwen/qwen3-coder-30b-a3b-instruct", - "hugging_face_id": "Qwen/Qwen3-Coder-30B-A3B-Instruct", "name": "Qwen: Qwen3 Coder 30B A3B Instruct", - "created": 1753972379, - "description": "Qwen3-Coder-30B-A3B-Instruct is a 30.5B parameter Mixture-of-Experts (MoE) model with 128 experts (8 active per forward pass), designed for advanced code generation, repository-scale understanding, and agentic tool use. Built on the Qwen3 architecture, it supports a native context length of 256K tokens (extendable to 1M with Yarn) and performs strongly in tasks involving function calls, browser use, and structured code completion.\n\nThis model is optimized for instruction-following without “thinking mode”, and integrates well with OpenAI-compatible tool-use formats. ", + "description": "Qwen3-Coder-30B-A3B-Instruct is a 30.5B parameter Mixture-of-Experts (MoE) model with 128 experts (8 active per forward pass), designed for advanced code generation, repository-scale understanding,.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000006", - "completion": "0.00000025", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-Coder-30B-A3B-Instruct" }, { "id": "qwen/qwen3-30b-a3b-instruct-2507", - "canonical_slug": "qwen/qwen3-30b-a3b-instruct-2507", - "hugging_face_id": "Qwen/Qwen3-30B-A3B-Instruct-2507", "name": "Qwen: Qwen3 30B A3B Instruct 2507", - "created": 1753806965, - "description": "Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference. It operates in non-thinking mode and is designed for high-quality instruction following, multilingual understanding, and agentic tool use. Post-trained on instruction data, it demonstrates competitive performance across reasoning (AIME, ZebraLogic), coding (MultiPL-E, LiveCodeBench), and alignment (IFEval, WritingBench) benchmarks. It outperforms its non-instruct variant on subjective and open-ended tasks while retaining strong factual and coding performance.", + "description": "Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000008", - "completion": "0.00000033", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-30B-A3B-Instruct-2507" }, { "id": "z-ai/glm-4.5", - "canonical_slug": "z-ai/glm-4.5", - "hugging_face_id": "zai-org/GLM-4.5", "name": "Z.AI: GLM 4.5", - "created": 1753471347, - "description": "GLM-4.5 is our latest flagship foundation model, purpose-built for agent-based applications. It leverages a Mixture-of-Experts (MoE) architecture and supports a context length of up to 128k tokens. GLM-4.5 delivers significantly enhanced capabilities in reasoning, code generation, and agent alignment. It supports a hybrid inference mode with two options, a \"thinking mode\" designed for complex reasoning and tool use, and a \"non-thinking mode\" optimized for instant responses. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "description": "GLM-4.5 is our latest flagship foundation model, purpose-built for agent-based applications.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000035", - "completion": "0.0000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_a", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": { - "temperature": 0.75, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "zai-org/GLM-4.5" }, { "id": "z-ai/glm-4.5-air:free", - "canonical_slug": "z-ai/glm-4.5-air", - "hugging_face_id": "zai-org/GLM-4.5-Air", "name": "Z.AI: GLM 4.5 Air (free)", - "created": 1753471258, - "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter size. GLM-4.5-Air also supports hybrid inference modes, offering a \"thinking mode\" for advanced reasoning and tool use, and a \"non-thinking mode\" for real-time interaction. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.75, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "zai-org/GLM-4.5-Air" }, { "id": "z-ai/glm-4.5-air", - "canonical_slug": "z-ai/glm-4.5-air", - "hugging_face_id": "zai-org/GLM-4.5-Air", "name": "Z.AI: GLM 4.5 Air", - "created": 1753471258, - "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter size. GLM-4.5-Air also supports hybrid inference modes, offering a \"thinking mode\" for advanced reasoning and tool use, and a \"non-thinking mode\" for real-time interaction. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)", + "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000013", - "completion": "0.00000085", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 98304, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.75, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "zai-org/GLM-4.5-Air" }, { "id": "qwen/qwen3-235b-a22b-thinking-2507", - "canonical_slug": "qwen/qwen3-235b-a22b-thinking-2507", - "hugging_face_id": "Qwen/Qwen3-235B-A22B-Thinking-2507", "name": "Qwen: Qwen3 235B A22B Thinking 2507", - "created": 1753449557, - "description": "Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks. It activates 22B of its 235B parameters per forward pass and natively supports up to 262,144 tokens of context. This \"thinking-only\" variant enhances structured logical reasoning, mathematics, science, and long-form generation, showing strong benchmark performance across AIME, SuperGPQA, LiveCodeBench, and MMLU-Redux. It enforces a special reasoning mode (\u003C/think\u003E) and is designed for high-token outputs (up to 81,920 tokens) in challenging domains.\n\nThe model is instruction-tuned and excels at step-by-step reasoning, tool use, agentic workflows, and multilingual tasks. This release represents the most capable open-source variant in the Qwen3-235B series, surpassing many closed models in structured reasoning use cases.", + "description": "Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0.00000011", - "completion": "0.0000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-235B-A22B-Thinking-2507" }, { "id": "z-ai/glm-4-32b", - "canonical_slug": "z-ai/glm-4-32b-0414", - "hugging_face_id": "", "name": "Z.AI: GLM 4 32B ", - "created": 1753376617, - "description": "GLM 4 32B is a cost-effective foundation language model.\n\nIt can efficiently perform complex tasks and has significantly enhanced capabilities in tool use, online search, and code-related intelligent tasks.\n\nIt is made by the same lab behind the thudm models.", + "description": "GLM 4 32B is a cost-effective foundation language model.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.75, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen3-coder:free", - "canonical_slug": "qwen/qwen3-coder-480b-a35b-07-25", - "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct", "name": "Qwen: Qwen3 Coder 480B A35B (free)", - "created": 1753230546, - "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over repositories. The model features 480 billion total parameters, with 35 billion active per forward pass (8 out of 160 experts).\n\nPricing for the Alibaba endpoints varies by context length. Once a request is greater than 128k input tokens, the higher pricing is used.", + "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team.", "context_length": 262000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262000, - "max_completion_tokens": 262000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct" }, { "id": "qwen/qwen3-coder", - "canonical_slug": "qwen/qwen3-coder-480b-a35b-07-25", - "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct", "name": "Qwen: Qwen3 Coder 480B A35B", - "created": 1753230546, - "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over repositories. The model features 480 billion total parameters, with 35 billion active per forward pass (8 out of 160 experts).\n\nPricing for the Alibaba endpoints varies by context length. Once a request is greater than 128k input tokens, the higher pricing is used.", + "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000022", - "completion": "0.00000095", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct" }, { "id": "qwen/qwen3-coder:exacto", - "canonical_slug": "qwen/qwen3-coder-480b-a35b-07-25", - "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct", "name": "Qwen: Qwen3 Coder 480B A35B (exacto)", - "created": 1753230546, - "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over repositories. The model features 480 billion total parameters, with 35 billion active per forward pass (8 out of 160 experts).\n\nPricing for the Alibaba endpoints varies by context length. Once a request is greater than 128k input tokens, the higher pricing is used.", + "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000038", - "completion": "0.00000153", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct" }, { "id": "bytedance/ui-tars-1.5-7b", - "canonical_slug": "bytedance/ui-tars-1.5-7b", - "hugging_face_id": "ByteDance-Seed/UI-TARS-1.5-7B", "name": "ByteDance: UI-TARS 7B ", - "created": 1753205056, - "description": "UI-TARS-1.5 is a multimodal vision-language agent optimized for GUI-based environments, including desktop interfaces, web browsers, mobile systems, and games. Built by ByteDance, it builds upon the UI-TARS framework with reinforcement learning-based reasoning, enabling robust action planning and execution across virtual interfaces.\n\nThis model achieves state-of-the-art results on a range of interactive and grounding benchmarks, including OSworld, WebVoyager, AndroidWorld, and ScreenSpot. It also demonstrates perfect task completion across diverse Poki games and outperforms prior models in Minecraft agent tasks. UI-TARS-1.5 supports thought decomposition during inference and shows strong scaling across variants, with the 1.5 version notably exceeding the performance of earlier 72B and 7B checkpoints.", + "description": "UI-TARS-1.5 is a multimodal vision-language agent optimized for GUI-based environments, including desktop interfaces, web browsers, mobile systems, and games.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 2048, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "ByteDance-Seed/UI-TARS-1.5-7B" }, { "id": "google/gemini-2.5-flash-lite", - "canonical_slug": "google/gemini-2.5-flash-lite", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Flash Lite", - "created": 1753200276, - "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance across common benchmarks compared to earlier Flash models. By default, \"thinking\" (i.e. multi-pass reasoning) is disabled to prioritize speed, but developers can enable it via the [Reasoning API parameter](https://openrouter.ai/docs/use-cases/reasoning-tokens) to selectively trade off cost for intelligence. ", + "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file", "audio", "video"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000001", - "input_cache_write": "0.0000001833" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 65535, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen3-235b-a22b-2507", - "canonical_slug": "qwen/qwen3-235b-a22b-07-25", - "hugging_face_id": "Qwen/Qwen3-235B-A22B-Instruct-2507", "name": "Qwen: Qwen3 235B A22B Instruct 2507", - "created": 1753119555, - "description": "Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass. It is optimized for general-purpose text generation, including instruction following, logical reasoning, math, code, and tool usage. The model supports a native 262K context length and does not implement \"thinking mode\" (\u003Cthink\u003E blocks).\n\nCompared to its base variant, this version delivers significant gains in knowledge coverage, long-context reasoning, coding benchmarks, and alignment with open-ended tasks. It is particularly strong on multilingual understanding, math reasoning (e.g., AIME, HMMT), and alignment evaluations like Arena-Hard and WritingBench.", + "description": "Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000008", - "completion": "0.00000055", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 262144, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-235B-A22B-Instruct-2507" }, { "id": "switchpoint/router", - "canonical_slug": "switchpoint/router", - "hugging_face_id": "", "name": "Switchpoint Router", - "created": 1752272899, - "description": "Switchpoint AI's router instantly analyzes your request and directs it to the optimal AI from an ever-evolving library. \n\nAs the world of LLMs advances, our router gets smarter, ensuring you always benefit from the industry's newest models without changing your workflow.\n\nThis model is configured for a simple, flat rate per response here on OpenRouter. It's powered by the full routing engine from [Switchpoint AI](https://www.switchpoint.dev).", + "description": "Switchpoint AI's router instantly analyzes your request and directs it to the optimal AI from an ever-evolving library.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000085", - "completion": "0.0000034", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "moonshotai/kimi-k2:free", - "canonical_slug": "moonshotai/kimi-k2", - "hugging_face_id": "moonshotai/Kimi-K2-Instruct", "name": "MoonshotAI: Kimi K2 0711 (free)", - "created": 1752263252, - "description": "Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. Kimi K2 excels across a broad range of benchmarks, particularly in coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) tasks. It supports long-context inference up to 128K tokens and is designed with a novel training stack that includes the MuonClip optimizer for stable large-scale MoE training.", + "description": "Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "seed", "stop", "temperature"], - "default_parameters": {} + "hugging_face_id": "moonshotai/Kimi-K2-Instruct" }, { "id": "moonshotai/kimi-k2", - "canonical_slug": "moonshotai/kimi-k2", - "hugging_face_id": "moonshotai/Kimi-K2-Instruct", "name": "MoonshotAI: Kimi K2 0711", - "created": 1752263252, - "description": "Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. Kimi K2 excels across a broad range of benchmarks, particularly in coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) tasks. It supports long-context inference up to 128K tokens and is designed with a novel training stack that includes the MuonClip optimizer for stable large-scale MoE training.", + "description": "Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000005", - "completion": "0.0000024", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "moonshotai/Kimi-K2-Instruct" }, { "id": "thudm/glm-4.1v-9b-thinking", - "canonical_slug": "thudm/glm-4.1v-9b-thinking", - "hugging_face_id": "THUDM/GLM-4.1V-9B-Thinking", "name": "THUDM: GLM 4.1V 9B Thinking", - "created": 1752244385, - "description": "GLM-4.1V-9B-Thinking is a 9B parameter vision-language model developed by THUDM, based on the GLM-4-9B foundation. It introduces a reasoning-centric \"thinking paradigm\" enhanced with reinforcement learning to improve multimodal reasoning, long-context understanding (up to 64K tokens), and complex problem solving. It achieves state-of-the-art performance among models in its class, outperforming even larger models like Qwen-2.5-VL-72B on a majority of benchmark tasks. ", + "description": "GLM-4.1V-9B-Thinking is a 9B parameter vision-language model developed by THUDM, based on the GLM-4-9B foundation.", "context_length": 65536, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000000035", - "completion": "0.000000138", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 65536, - "max_completion_tokens": 8000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "THUDM/GLM-4.1V-9B-Thinking" }, { "id": "mistralai/devstral-medium", - "canonical_slug": "mistralai/devstral-medium-2507", - "hugging_face_id": "", "name": "Mistral: Devstral Medium", - "created": 1752161321, - "description": "Devstral Medium is a high-performance code generation and agentic reasoning model developed jointly by Mistral AI and All Hands AI. Positioned as a step up from Devstral Small, it achieves 61.6% on SWE-Bench Verified, placing it ahead of Gemini 2.5 Pro and GPT-4.1 in code-related tasks, at a fraction of the cost. It is designed for generalization across prompt styles and tool use in code agents and frameworks.\n\nDevstral Medium is available via API only (not open-weight), and supports enterprise deployment on private infrastructure, with optional fine-tuning capabilities.", + "description": "Devstral Medium is a high-performance code generation and agentic reasoning model developed jointly by Mistral AI and All Hands AI.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "mistralai/devstral-small", - "canonical_slug": "mistralai/devstral-small-2507", - "hugging_face_id": "mistralai/Devstral-Small-2507", "name": "Mistral: Devstral Small 1.1", - "created": 1752160751, - "description": "Devstral Small 1.1 is a 24B parameter open-weight language model for software engineering agents, developed by Mistral AI in collaboration with All Hands AI. Finetuned from Mistral Small 3.1 and released under the Apache 2.0 license, it features a 128k token context window and supports both Mistral-style function calling and XML output formats.\n\nDesigned for agentic coding workflows, Devstral Small 1.1 is optimized for tasks such as codebase exploration, multi-file edits, and integration into autonomous development agents like OpenHands and Cline. It achieves 53.6% on SWE-Bench Verified, surpassing all other open models on this benchmark, while remaining lightweight enough to run on a single 4090 GPU or Apple silicon machine. The model uses a Tekken tokenizer with a 131k vocabulary and is deployable via vLLM, Transformers, Ollama, LM Studio, and other OpenAI-compatible runtimes.\n", + "description": "Devstral Small 1.1 is a 24B parameter open-weight language model for software engineering agents, developed by Mistral AI in collaboration with All Hands AI.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000007", - "completion": "0.00000028", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Devstral-Small-2507" }, { "id": "cognitivecomputations/dolphin-mistral-24b-venice-edition:free", - "canonical_slug": "venice/uncensored", - "hugging_face_id": "cognitivecomputations/Dolphin-Mistral-24B-Venice-Edition", "name": "Venice: Uncensored (free)", - "created": 1752094966, - "description": "Venice Uncensored Dolphin Mistral 24B Venice Edition is a fine-tuned variant of Mistral-Small-24B-Instruct-2501, developed by dphn.ai in collaboration with Venice.ai. This model is designed as an “uncensored” instruct-tuned LLM, preserving user control over alignment, system prompts, and behavior. Intended for advanced and unrestricted use cases, Venice Uncensored emphasizes steerability and transparent behavior, removing default safety and alignment layers typically found in mainstream assistant models.", + "description": "Venice Uncensored Dolphin Mistral 24B Venice Edition is a fine-tuned variant of Mistral-Small-24B-Instruct-2501, developed by dphn.ai in collaboration with Venice.ai.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "cognitivecomputations/Dolphin-Mistral-24B-Venice-Edition" }, { "id": "x-ai/grok-4", - "canonical_slug": "x-ai/grok-4-07-09", - "hugging_face_id": "", "name": "xAI: Grok 4", - "created": 1752087689, - "description": "Grok 4 is xAI's latest reasoning model with a 256k context window. It supports parallel tool calling, structured outputs, and both image and text inputs. Note that reasoning is not exposed, reasoning cannot be disabled, and the reasoning effort cannot be specified. Pricing increases once the total tokens in a given request is greater than 128k tokens. See more details on the [xAI docs](https://docs.x.ai/docs/models/grok-4-0709)", + "description": "Grok 4 is xAI's latest reasoning model with a 256k context window.", "context_length": 256000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Grok", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000075" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "logprobs", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "google/gemma-3n-e2b-it:free", - "canonical_slug": "google/gemma-3n-e2b-it", - "hugging_face_id": "google/gemma-3n-E2B-it", "name": "Google: Gemma 3n 2B (free)", - "created": 1752074904, - "description": "Gemma 3n E2B IT is a multimodal, instruction-tuned model developed by Google DeepMind, designed to operate efficiently at an effective parameter size of 2B while leveraging a 6B architecture. Based on the MatFormer architecture, it supports nested submodels and modular composition via the Mix-and-Match framework. Gemma 3n models are optimized for low-resource deployment, offering 32K context length and strong multilingual and reasoning performance across common benchmarks. This variant is trained on a diverse corpus including code, math, web, and multimodal data.", + "description": "Gemma 3n E2B IT is a multimodal, instruction-tuned model developed by Google DeepMind, designed to operate efficiently at an effective parameter size of 2B while leveraging a 6B architecture.", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 2048, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "google/gemma-3n-E2B-it" }, { "id": "tencent/hunyuan-a13b-instruct", - "canonical_slug": "tencent/hunyuan-a13b-instruct", - "hugging_face_id": "tencent/Hunyuan-A13B-Instruct", "name": "Tencent: Hunyuan A13B Instruct", - "created": 1751987664, - "description": "Hunyuan-A13B is a 13B active parameter Mixture-of-Experts (MoE) language model developed by Tencent, with a total parameter count of 80B and support for reasoning via Chain-of-Thought. It offers competitive benchmark performance across mathematics, science, coding, and multi-turn reasoning tasks, while maintaining high inference efficiency via Grouped Query Attention (GQA) and quantization support (FP8, GPTQ, etc.).", + "description": "Hunyuan-A13B is a 13B active parameter Mixture-of-Experts (MoE) language model developed by Tencent, with a total parameter count of 80B and support for reasoning via Chain-of-Thought.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000014", - "completion": "0.00000057", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "reasoning", - "response_format", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "tencent/Hunyuan-A13B-Instruct" }, { "id": "tngtech/deepseek-r1t2-chimera:free", - "canonical_slug": "tngtech/deepseek-r1t2-chimera", - "hugging_face_id": "tngtech/DeepSeek-TNG-R1T2-Chimera", "name": "TNG: DeepSeek R1T2 Chimera (free)", - "created": 1751986985, - "description": "DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech. It is a 671 B-parameter mixture-of-experts text-generation model assembled from DeepSeek-AI’s R1-0528, R1, and V3-0324 checkpoints with an Assembly-of-Experts merge. The tri-parent design yields strong reasoning performance while running roughly 20 % faster than the original R1 and more than 2× faster than R1-0528 under vLLM, giving a favorable cost-to-intelligence trade-off. The checkpoint supports contexts up to 60 k tokens in standard use (tested to ~130 k) and maintains consistent \u003Cthink\u003E token behaviour, making it suitable for long-context analysis, dialogue and other open-ended generation tasks.", + "description": "DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "tngtech/DeepSeek-TNG-R1T2-Chimera" }, { "id": "tngtech/deepseek-r1t2-chimera", - "canonical_slug": "tngtech/deepseek-r1t2-chimera", - "hugging_face_id": "tngtech/DeepSeek-TNG-R1T2-Chimera", "name": "TNG: DeepSeek R1T2 Chimera", - "created": 1751986985, - "description": "DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech. It is a 671 B-parameter mixture-of-experts text-generation model assembled from DeepSeek-AI’s R1-0528, R1, and V3-0324 checkpoints with an Assembly-of-Experts merge. The tri-parent design yields strong reasoning performance while running roughly 20 % faster than the original R1 and more than 2× faster than R1-0528 under vLLM, giving a favorable cost-to-intelligence trade-off. The checkpoint supports contexts up to 60 k tokens in standard use (tested to ~130 k) and maintains consistent \u003Cthink\u003E token behaviour, making it suitable for long-context analysis, dialogue and other open-ended generation tasks.", + "description": "DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": 163840, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "tngtech/DeepSeek-TNG-R1T2-Chimera" }, { "id": "morph/morph-v3-large", - "canonical_slug": "morph/morph-v3-large", - "hugging_face_id": "", "name": "Morph: Morph V3 Large", - "created": 1751910858, - "description": "Morph's high-accuracy apply model for complex code edits. ~4,500 tokens/sec with 98% accuracy for precise code transformations.\n\nThe model requires the prompt to be in the following format: \n\u003Cinstruction\u003E{instruction}\u003C/instruction\u003E\n\u003Ccode\u003E{initial_code}\u003C/code\u003E\n\u003Cupdate\u003E{edit_snippet}\u003C/update\u003E\n\nZero Data Retention is enabled for Morph. Learn more about this model in their [documentation](https://docs.morphllm.com/quickstart)", + "description": "Morph's high-accuracy apply model for complex code edits.", "context_length": 262144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000009", - "completion": "0.0000019", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 262144, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "stop", "temperature"], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "morph/morph-v3-fast", - "canonical_slug": "morph/morph-v3-fast", - "hugging_face_id": "", "name": "Morph: Morph V3 Fast", - "created": 1751910002, - "description": "Morph's fastest apply model for code edits. ~10,500 tokens/sec with 96% accuracy for rapid code transformations.\n\nThe model requires the prompt to be in the following format: \n\u003Cinstruction\u003E{instruction}\u003C/instruction\u003E\n\u003Ccode\u003E{initial_code}\u003C/code\u003E\n\u003Cupdate\u003E{edit_snippet}\u003C/update\u003E\n\nZero Data Retention is enabled for Morph. Learn more about this model in their [documentation](https://docs.morphllm.com/quickstart)", + "description": "Morph's fastest apply model for code edits.", "context_length": 81920, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000008", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 81920, - "max_completion_tokens": 38000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "stop", "temperature"], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "baidu/ernie-4.5-vl-424b-a47b", - "canonical_slug": "baidu/ernie-4.5-vl-424b-a47b", - "hugging_face_id": "baidu/ERNIE-4.5-VL-424B-A47B-PT", "name": "Baidu: ERNIE 4.5 VL 424B A47B ", - "created": 1751300903, - "description": "ERNIE-4.5-VL-424B-A47B is a multimodal Mixture-of-Experts (MoE) model from Baidu’s ERNIE 4.5 series, featuring 424B total parameters with 47B active per token. It is trained jointly on text and image data using a heterogeneous MoE architecture and modality-isolated routing to enable high-fidelity cross-modal reasoning, image understanding, and long-context generation (up to 131k tokens). Fine-tuned with techniques like SFT, DPO, UPO, and RLVR, this model supports both “thinking” and non-thinking inference modes. Designed for vision-language tasks in English and Chinese, it is optimized for efficient scaling and can operate under 4-bit/8-bit quantization.", + "description": "ERNIE-4.5-VL-424B-A47B is a multimodal Mixture-of-Experts (MoE) model from Baidu’s ERNIE 4.5 series, featuring 424B total parameters with 47B active per token.", "context_length": 123000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000042", - "completion": "0.00000125", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 123000, - "max_completion_tokens": 16000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "baidu/ERNIE-4.5-VL-424B-A47B-PT" }, { "id": "baidu/ernie-4.5-300b-a47b", - "canonical_slug": "baidu/ernie-4.5-300b-a47b", - "hugging_face_id": "baidu/ERNIE-4.5-300B-A47B-PT", "name": "Baidu: ERNIE 4.5 300B A47B ", - "created": 1751300139, - "description": "ERNIE-4.5-300B-A47B is a 300B parameter Mixture-of-Experts (MoE) language model developed by Baidu as part of the ERNIE 4.5 series. It activates 47B parameters per token and supports text generation in both English and Chinese. Optimized for high-throughput inference and efficient scaling, it uses a heterogeneous MoE structure with advanced routing and quantization strategies, including FP8 and 2-bit formats. This version is fine-tuned for language-only tasks and supports reasoning, tool parameters, and extended context lengths up to 131k tokens. Suitable for general-purpose LLM applications with high reasoning and throughput demands.", + "description": "ERNIE-4.5-300B-A47B is a 300B parameter Mixture-of-Experts (MoE) language model developed by Baidu as part of the ERNIE 4.5 series.", "context_length": 123000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000028", - "completion": "0.0000011", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 123000, - "max_completion_tokens": 12000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "baidu/ERNIE-4.5-300B-A47B-PT" }, { "id": "thedrummer/anubis-70b-v1.1", - "canonical_slug": "thedrummer/anubis-70b-v1.1", - "hugging_face_id": "TheDrummer/Anubis-70B-v1.1", "name": "TheDrummer: Anubis 70B V1.1", - "created": 1751208347, - "description": "TheDrummer's Anubis v1.1 is an unaligned, creative Llama 3.3 70B model focused on providing character-driven roleplay & stories. It excels at gritty, visceral prose, unique character adherence, and coherent narratives, while maintaining the instruction following Llama 3.3 70B is known for.", + "description": "TheDrummer's Anubis v1.1 is an unaligned, creative Llama 3.3 70B model focused on providing character-driven roleplay & stories.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000065", - "completion": "0.000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "TheDrummer/Anubis-70B-v1.1" }, { "id": "inception/mercury", - "canonical_slug": "inception/mercury", - "hugging_face_id": "", "name": "Inception: Mercury", - "created": 1750973026, - "description": "Mercury is the first diffusion large language model (dLLM). Applying a breakthrough discrete diffusion approach, the model runs 5-10x faster than even speed optimized models like GPT-4.1 Nano and Claude 3.5 Haiku while matching their performance. Mercury's speed enables developers to provide responsive user experiences, including with voice agents, search interfaces, and chatbots. Read more in the [blog post]\n(https://www.inceptionlabs.ai/blog/introducing-mercury) here. ", + "description": "Mercury is the first diffusion large language model (dLLM).", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000025", - "completion": "0.000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "mistralai/mistral-small-3.2-24b-instruct:free", - "canonical_slug": "mistralai/mistral-small-3.2-24b-instruct-2506", - "hugging_face_id": "mistralai/Mistral-Small-3.2-24B-Instruct-2506", "name": "Mistral: Mistral Small 3.2 24B (free)", - "created": 1750443016, - "description": "Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling. Compared to the 3.1 release, version 3.2 significantly improves accuracy on WildBench and Arena Hard, reduces infinite generations, and delivers gains in tool use and structured output tasks.\n\nIt supports image and text inputs with structured outputs, function/tool calling, and strong performance across coding (HumanEval+, MBPP), STEM (MMLU, MATH, GPQA), and vision benchmarks (ChartQA, DocVQA).", + "description": "Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-Small-3.2-24B-Instruct-2506" }, { "id": "mistralai/mistral-small-3.2-24b-instruct", - "canonical_slug": "mistralai/mistral-small-3.2-24b-instruct-2506", - "hugging_face_id": "mistralai/Mistral-Small-3.2-24B-Instruct-2506", "name": "Mistral: Mistral Small 3.2 24B", - "created": 1750443016, - "description": "Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling. Compared to the 3.1 release, version 3.2 significantly improves accuracy on WildBench and Arena Hard, reduces infinite generations, and delivers gains in tool use and structured output tasks.\n\nIt supports image and text inputs with structured outputs, function/tool calling, and strong performance across coding (HumanEval+, MBPP), STEM (MMLU, MATH, GPQA), and vision benchmarks (ChartQA, DocVQA).", + "description": "Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000006", - "completion": "0.00000018", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-Small-3.2-24B-Instruct-2506" }, { "id": "minimax/minimax-m1", - "canonical_slug": "minimax/minimax-m1", - "hugging_face_id": "", "name": "MiniMax: MiniMax M1", - "created": 1750200414, - "description": "MiniMax-M1 is a large-scale, open-weight reasoning model designed for extended context and high-efficiency inference. It leverages a hybrid Mixture-of-Experts (MoE) architecture paired with a custom \"lightning attention\" mechanism, allowing it to process long sequences—up to 1 million tokens—while maintaining competitive FLOP efficiency. With 456 billion total parameters and 45.9B active per token, this variant is optimized for complex, multi-step reasoning tasks.\n\nTrained via a custom reinforcement learning pipeline (CISPO), M1 excels in long-context understanding, software engineering, agentic tool use, and mathematical reasoning. Benchmarks show strong performance across FullStackBench, SWE-bench, MATH, GPQA, and TAU-Bench, often outperforming other open models like DeepSeek R1 and Qwen3-235B.", + "description": "MiniMax-M1 is a large-scale, open-weight reasoning model designed for extended context and high-efficiency inference.", "context_length": 1000000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.0000022", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1000000, - "max_completion_tokens": 40000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "google/gemini-2.5-flash-lite-preview-06-17", - "canonical_slug": "google/gemini-2.5-flash-lite-preview-06-17", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Flash Lite Preview 06-17", - "created": 1750173831, - "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance across common benchmarks compared to earlier Flash models. By default, \"thinking\" (i.e. multi-pass reasoning) is disabled to prioritize speed, but developers can enable it via the [Reasoning API parameter](https://openrouter.ai/docs/use-cases/reasoning-tokens) to selectively trade off cost for intelligence. ", + "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["file", "image", "text", "audio"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000004", - "request": "0", - "image": "0", - "audio": "0.0000003", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000025", - "input_cache_write": "0.0000001833" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 65535, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "google/gemini-2.5-flash", - "canonical_slug": "google/gemini-2.5-flash", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Flash", - "created": 1750172488, - "description": "Gemini 2.5 Flash is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in \"thinking\" capabilities, enabling it to provide responses with greater accuracy and nuanced context handling. \n\nAdditionally, Gemini 2.5 Flash is configurable through the \"max tokens for reasoning\" parameter, as described in the documentation (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning).", + "description": "Gemini 2.5 Flash is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["file", "image", "text", "audio", "video"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000025", - "request": "0", - "image": "0.001238", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000003", - "input_cache_write": "0.0000003833" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 65535, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "google/gemini-2.5-pro", - "canonical_slug": "google/gemini-2.5-pro", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Pro", - "created": 1750169544, - "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.", + "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file", "audio", "video"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00001", - "request": "0", - "image": "0.00516", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000125", - "input_cache_write": "0.000001625" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "moonshotai/kimi-dev-72b", - "canonical_slug": "moonshotai/kimi-dev-72b", - "hugging_face_id": "moonshotai/Kimi-Dev-72B", "name": "MoonshotAI: Kimi Dev 72B", - "created": 1750115909, - "description": "Kimi-Dev-72B is an open-source large language model fine-tuned for software engineering and issue resolution tasks. Based on Qwen2.5-72B, it is optimized using large-scale reinforcement learning that applies code patches in real repositories and validates them via full test suite execution—rewarding only correct, robust completions. The model achieves 60.4% on SWE-bench Verified, setting a new benchmark among open-source models for software bug fixing and code reasoning.", + "description": "Kimi-Dev-72B is an open-source large language model fine-tuned for software engineering and issue resolution tasks.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000029", - "completion": "0.00000115", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "reasoning", - "response_format", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "moonshotai/Kimi-Dev-72B" }, { "id": "openai/o3-pro", - "canonical_slug": "openai/o3-pro-2025-06-10", - "hugging_face_id": "", "name": "OpenAI: o3 Pro", - "created": 1749598352, - "description": "The o-series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o3-pro model uses more compute to think harder and provide consistently better answers.\n\nNote that BYOK is required for this model. Set up here: https://openrouter.ai/settings/integrations", + "description": "The o-series of models are trained with reinforcement learning to think before they answer and perform complex reasoning.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "file", "image"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00002", - "completion": "0.00008", - "request": "0", - "image": "0.0153", - "web_search": "0.01", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "x-ai/grok-3-mini", - "canonical_slug": "x-ai/grok-3-mini", - "hugging_face_id": "", "name": "xAI: Grok 3 Mini", - "created": 1749583245, - "description": "A lightweight model that thinks before responding. Fast, smart, and great for logic-based tasks that do not require deep domain knowledge. The raw thinking traces are accessible.", + "description": "A lightweight model that thinks before responding.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Grok", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000075" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "logprobs", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "x-ai/grok-3", - "canonical_slug": "x-ai/grok-3", - "hugging_face_id": "", "name": "xAI: Grok 3", - "created": 1749582908, - "description": "Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in finance, healthcare, law, and science.\n\n", + "description": "Grok 3 is the latest model from xAI.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Grok", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000075" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/magistral-small-2506", - "canonical_slug": "mistralai/magistral-small-2506", - "hugging_face_id": "mistralai/Magistral-Small-2506", "name": "Mistral: Magistral Small 2506", - "created": 1749569561, - "description": "Magistral Small is a 24B parameter instruction-tuned model based on Mistral-Small-3.1 (2503), enhanced through supervised fine-tuning on traces from Magistral Medium and further refined via reinforcement learning. It is optimized for reasoning and supports a wide multilingual range, including over 20 languages.", + "description": "Magistral Small is a 24B parameter instruction-tuned model based on Mistral-Small-3.1 (2503), enhanced through supervised fine-tuning on traces from Magistral Medium and further refined via.", "context_length": 40000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000005", - "completion": "0.0000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40000, - "max_completion_tokens": 40000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Magistral-Small-2506" }, { "id": "mistralai/magistral-medium-2506:thinking", - "canonical_slug": "mistralai/magistral-medium-2506", - "hugging_face_id": "", "name": "Mistral: Magistral Medium 2506 (thinking)", - "created": 1749354054, - "description": "Magistral is Mistral's first reasoning model. It is ideal for general purpose use requiring longer thought processing and better accuracy than with non-reasoning LLMs. From legal research and financial forecasting to software development and creative storytelling — this model solves multi-step challenges where transparency and precision are critical.", + "description": "Magistral is Mistral's first reasoning model.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": 40000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "mistralai/magistral-medium-2506", - "canonical_slug": "mistralai/magistral-medium-2506", - "hugging_face_id": "", "name": "Mistral: Magistral Medium 2506", - "created": 1749354054, - "description": "Magistral is Mistral's first reasoning model. It is ideal for general purpose use requiring longer thought processing and better accuracy than with non-reasoning LLMs. From legal research and financial forecasting to software development and creative storytelling — this model solves multi-step challenges where transparency and precision are critical.", + "description": "Magistral is Mistral's first reasoning model.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": 40000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "google/gemini-2.5-pro-preview", - "canonical_slug": "google/gemini-2.5-pro-preview-06-05", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Pro Preview 06-05", - "created": 1749137257, - "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.\n", + "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["file", "image", "text", "audio"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00001", - "request": "0", - "image": "0.00516", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000031", - "input_cache_write": "0.000001625" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 65536, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "deepseek/deepseek-r1-0528-qwen3-8b:free", - "canonical_slug": "deepseek/deepseek-r1-0528-qwen3-8b", - "hugging_face_id": "deepseek-ai/deepseek-r1-0528-qwen3-8b", "name": "DeepSeek: DeepSeek R1 0528 Qwen3 8B (free)", - "created": 1748538543, - "description": "DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and Gemini 2.5 Pro.\nIt now tops math, programming, and logic leaderboards, showcasing a step-change in depth-of-thought.\nThe distilled variant, DeepSeek-R1-0528-Qwen3-8B, transfers this chain-of-thought into an 8 B-parameter form, beating standard Qwen3 8B by +10 pp and tying the 235 B “thinking” giant on AIME 2024.", + "description": "DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "deepseek-ai/deepseek-r1-0528-qwen3-8b" }, { "id": "deepseek/deepseek-r1-0528-qwen3-8b", - "canonical_slug": "deepseek/deepseek-r1-0528-qwen3-8b", - "hugging_face_id": "deepseek-ai/deepseek-r1-0528-qwen3-8b", "name": "DeepSeek: DeepSeek R1 0528 Qwen3 8B", - "created": 1748538543, - "description": "DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and Gemini 2.5 Pro.\nIt now tops math, programming, and logic leaderboards, showcasing a step-change in depth-of-thought.\nThe distilled variant, DeepSeek-R1-0528-Qwen3-8B, transfers this chain-of-thought into an 8 B-parameter form, beating standard Qwen3 8B by +10 pp and tying the 235 B “thinking” giant on AIME 2024.", + "description": "DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.00000002", - "completion": "0.0000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "deepseek-ai/deepseek-r1-0528-qwen3-8b" }, { "id": "deepseek/deepseek-r1-0528:free", - "canonical_slug": "deepseek/deepseek-r1-0528", - "hugging_face_id": "deepseek-ai/DeepSeek-R1-0528", "name": "DeepSeek: R1 0528 (free)", - "created": 1748455170, - "description": "May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model.", + "description": "May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-R1-0528" }, { "id": "deepseek/deepseek-r1-0528", - "canonical_slug": "deepseek/deepseek-r1-0528", - "hugging_face_id": "deepseek-ai/DeepSeek-R1-0528", "name": "DeepSeek: R1 0528", - "created": 1748455170, - "description": "May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model.", + "description": "May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.00000175", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": 163840, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-R1-0528" }, { "id": "anthropic/claude-opus-4", - "canonical_slug": "anthropic/claude-4-opus-20250522", - "hugging_face_id": "", "name": "Anthropic: Claude Opus 4", - "created": 1747931245, - "description": "Claude Opus 4 is benchmarked as the world’s best coding model, at time of release, bringing sustained performance on complex, long-running tasks and agent workflows. It sets new benchmarks in software engineering, achieving leading results on SWE-bench (72.5%) and Terminal-bench (43.2%). Opus 4 supports extended, agentic workflows, handling thousands of task steps continuously for hours without degradation. \n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-4)", + "description": "Claude Opus 4 is benchmarked as the world’s best coding model, at time of release, bringing sustained performance on complex, long-running tasks and agent workflows.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000015", - "completion": "0.000075", - "request": "0", - "image": "0.024", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000015", - "input_cache_write": "0.00001875" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 32000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "anthropic/claude-sonnet-4", - "canonical_slug": "anthropic/claude-4-sonnet-20250522", - "hugging_face_id": "", "name": "Anthropic: Claude Sonnet 4", - "created": 1747930371, - "description": "Claude Sonnet 4 significantly enhances the capabilities of its predecessor, Sonnet 3.7, excelling in both coding and reasoning tasks with improved precision and controllability. Achieving state-of-the-art performance on SWE-bench (72.7%), Sonnet 4 balances capability and computational efficiency, making it suitable for a broad range of applications from routine coding tasks to complex software development projects. Key enhancements include improved autonomous codebase navigation, reduced error rates in agent-driven workflows, and increased reliability in following intricate instructions. Sonnet 4 is optimized for practical everyday use, providing advanced reasoning capabilities while maintaining efficiency and responsiveness in diverse internal and external scenarios.\n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-4)", + "description": "Claude Sonnet 4 significantly enhances the capabilities of its predecessor, Sonnet 3.7, excelling in both coding and reasoning tasks with improved precision and controllability.", "context_length": 1000000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0.0048", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000003", - "input_cache_write": "0.00000375" - }, - "top_provider": { - "context_length": 1000000, - "max_completion_tokens": 64000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/devstral-small-2505", - "canonical_slug": "mistralai/devstral-small-2505", - "hugging_face_id": "mistralai/Devstral-Small-2505", "name": "Mistral: Devstral Small 2505", - "created": 1747837379, - "description": "Devstral-Small-2505 is a 24B parameter agentic LLM fine-tuned from Mistral-Small-3.1, jointly developed by Mistral AI and All Hands AI for advanced software engineering tasks. It is optimized for codebase exploration, multi-file editing, and integration into coding agents, achieving state-of-the-art results on SWE-Bench Verified (46.8%).\n\nDevstral supports a 128k context window and uses a custom Tekken tokenizer. It is text-only, with the vision encoder removed, and is suitable for local deployment on high-end consumer hardware (e.g., RTX 4090, 32GB RAM Macs). Devstral is best used in agentic workflows via the OpenHands scaffold and is compatible with inference frameworks like vLLM, Transformers, and Ollama. It is released under the Apache 2.0 license.", + "description": "Devstral-Small-2505 is a 24B parameter agentic LLM fine-tuned from Mistral-Small-3.1, jointly developed by Mistral AI and All Hands AI for advanced software engineering tasks.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000006", - "completion": "0.00000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Devstral-Small-2505" }, { "id": "google/gemma-3n-e4b-it:free", - "canonical_slug": "google/gemma-3n-e4b-it", - "hugging_face_id": "google/gemma-3n-E4B-it", "name": "Google: Gemma 3n 4B (free)", - "created": 1747776824, - "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks such as text generation, speech recognition, translation, and image analysis. Leveraging innovations like Per-Layer Embedding (PLE) caching and the MatFormer architecture, Gemma 3n dynamically manages memory usage and computational load by selectively activating model parameters, significantly reducing runtime resource requirements.\n\nThis model supports a wide linguistic range (trained in over 140 languages) and features a flexible 32K token context window. Gemma 3n can selectively load parameters, optimizing memory and computational efficiency based on the task or device capabilities, making it well-suited for privacy-focused, offline-capable applications and on-device AI solutions. [Read more in the blog post](https://developers.googleblog.com/en/introducing-gemma-3n/)", + "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets.", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 2048, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "google/gemma-3n-E4B-it" }, { "id": "google/gemma-3n-e4b-it", - "canonical_slug": "google/gemma-3n-e4b-it", - "hugging_face_id": "google/gemma-3n-E4B-it", "name": "Google: Gemma 3n 4B", - "created": 1747776824, - "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks such as text generation, speech recognition, translation, and image analysis. Leveraging innovations like Per-Layer Embedding (PLE) caching and the MatFormer architecture, Gemma 3n dynamically manages memory usage and computational load by selectively activating model parameters, significantly reducing runtime resource requirements.\n\nThis model supports a wide linguistic range (trained in over 140 languages) and features a flexible 32K token context window. Gemma 3n can selectively load parameters, optimizing memory and computational efficiency based on the task or device capabilities, making it well-suited for privacy-focused, offline-capable applications and on-device AI solutions. [Read more in the blog post](https://developers.googleblog.com/en/introducing-gemma-3n/)", + "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000002", - "completion": "0.00000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "google/gemma-3n-E4B-it" }, { "id": "openai/codex-mini", - "canonical_slug": "openai/codex-mini", - "hugging_face_id": "", "name": "OpenAI: Codex Mini", - "created": 1747409761, - "description": "codex-mini-latest is a fine-tuned version of o4-mini specifically for use in Codex CLI. For direct use in the API, we recommend starting with gpt-4.1.", + "description": "codex-mini-latest is a fine-tuned version of o4-mini specifically for use in Codex CLI.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000015", - "completion": "0.000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000375" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "meta-llama/llama-3.3-8b-instruct:free", - "canonical_slug": "meta-llama/llama-3.3-8b-instruct", - "hugging_face_id": "", "name": "Meta: Llama 3.3 8B Instruct (free)", - "created": 1747230154, "description": "A lightweight and ultra-fast variant of Llama 3.3 70B, for use when quick response times are needed most.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4028, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "repetition_penalty", - "response_format", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "nousresearch/deephermes-3-mistral-24b-preview", - "canonical_slug": "nousresearch/deephermes-3-mistral-24b-preview", - "hugging_face_id": "NousResearch/DeepHermes-3-Mistral-24B-Preview", "name": "Nous: DeepHermes 3 Mistral 24B Preview", - "created": 1746830904, - "description": "DeepHermes 3 (Mistral 24B Preview) is an instruction-tuned language model by Nous Research based on Mistral-Small-24B, designed for chat, function calling, and advanced multi-turn reasoning. It introduces a dual-mode system that toggles between intuitive chat responses and structured “deep reasoning” mode using special system prompts. Fine-tuned via distillation from R1, it supports structured output (JSON mode) and function call syntax for agent-based applications.\n\nDeepHermes 3 supports a **reasoning toggle via system prompt**, allowing users to switch between fast, intuitive responses and deliberate, multi-step reasoning. When activated with the following specific system instruction, the model enters a *\"deep thinking\"* mode—generating extended chains of thought wrapped in `\u003Cthink\u003E\u003C/think\u003E` tags before delivering a final answer. \n\nSystem Prompt: You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside \u003Cthink\u003E \u003C/think\u003E tags, and then provide your solution or response to the problem.\n", + "description": "DeepHermes 3 (Mistral 24B Preview) is an instruction-tuned language model by Nous Research based on Mistral-Small-24B, designed for chat, function calling, and advanced multi-turn reasoning.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.00000059", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "NousResearch/DeepHermes-3-Mistral-24B-Preview" }, { "id": "mistralai/mistral-medium-3", - "canonical_slug": "mistralai/mistral-medium-3", - "hugging_face_id": "", "name": "Mistral: Mistral Medium 3", - "created": 1746627341, - "description": "Mistral Medium 3 is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances state-of-the-art reasoning and multimodal performance with 8× lower cost compared to traditional large models, making it suitable for scalable deployments across professional and industrial use cases.\n\nThe model excels in domains such as coding, STEM reasoning, and enterprise adaptation. It supports hybrid, on-prem, and in-VPC deployments and is optimized for integration into custom workflows. Mistral Medium 3 offers competitive accuracy relative to larger models like Claude Sonnet 3.5/3.7, Llama 4 Maverick, and Command R+, while maintaining broad compatibility across cloud environments.", + "description": "Mistral Medium 3 is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "google/gemini-2.5-pro-preview-05-06", - "canonical_slug": "google/gemini-2.5-pro-preview-03-25", - "hugging_face_id": "", "name": "Google: Gemini 2.5 Pro Preview 05-06", - "created": 1746578513, - "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.", + "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file", "audio", "video"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000125", - "completion": "0.00001", - "request": "0", - "image": "0.00516", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000031", - "input_cache_write": "0.000001625" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 65535, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "arcee-ai/spotlight", - "canonical_slug": "arcee-ai/spotlight", - "hugging_face_id": "", "name": "Arcee AI: Spotlight", - "created": 1746481552, - "description": "Spotlight is a 7‑billion‑parameter vision‑language model derived from Qwen 2.5‑VL and fine‑tuned by Arcee AI for tight image‑text grounding tasks. It offers a 32 k‑token context window, enabling rich multimodal conversations that combine lengthy documents with one or more images. Training emphasized fast inference on consumer GPUs while retaining strong captioning, visual‐question‑answering, and diagram‑analysis accuracy. As a result, Spotlight slots neatly into agent workflows where screenshots, charts or UI mock‑ups need to be interpreted on the fly. Early benchmarks show it matching or out‑scoring larger VLMs such as LLaVA‑1.6 13 B on popular VQA and POPE alignment tests. ", + "description": "Spotlight is a 7‑billion‑parameter vision‑language model derived from Qwen 2.5‑VL and fine‑tuned by Arcee AI for tight image‑text grounding tasks.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000018", - "completion": "0.00000018", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 65537, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "arcee-ai/maestro-reasoning", - "canonical_slug": "arcee-ai/maestro-reasoning", - "hugging_face_id": "", "name": "Arcee AI: Maestro Reasoning", - "created": 1746481269, - "description": "Maestro Reasoning is Arcee's flagship analysis model: a 32 B‑parameter derivative of Qwen 2.5‑32 B tuned with DPO and chain‑of‑thought RL for step‑by‑step logic. Compared to the earlier 7 B preview, the production 32 B release widens the context window to 128 k tokens and doubles pass‑rate on MATH and GSM‑8K, while also lifting code completion accuracy. Its instruction style encourages structured \"thought → answer\" traces that can be parsed or hidden according to user preference. That transparency pairs well with audit‑focused industries like finance or healthcare where seeing the reasoning path matters. In Arcee Conductor, Maestro is automatically selected for complex, multi‑constraint queries that smaller SLMs bounce. ", + "description": "Maestro Reasoning is Arcee's flagship analysis model: a 32 B‑parameter derivative of Qwen 2.5‑32 B tuned with DPO and chain‑of‑thought RL for step‑by‑step logic.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000009", - "completion": "0.0000033", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 32000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "arcee-ai/virtuoso-large", - "canonical_slug": "arcee-ai/virtuoso-large", - "hugging_face_id": "", "name": "Arcee AI: Virtuoso Large", - "created": 1746478885, - "description": "Virtuoso‑Large is Arcee's top‑tier general‑purpose LLM at 72 B parameters, tuned to tackle cross‑domain reasoning, creative writing and enterprise QA. Unlike many 70 B peers, it retains the 128 k context inherited from Qwen 2.5, letting it ingest books, codebases or financial filings wholesale. Training blended DeepSeek R1 distillation, multi‑epoch supervised fine‑tuning and a final DPO/RLHF alignment stage, yielding strong performance on BIG‑Bench‑Hard, GSM‑8K and long‑context Needle‑In‑Haystack tests. Enterprises use Virtuoso‑Large as the \"fallback\" brain in Conductor pipelines when other SLMs flag low confidence. Despite its size, aggressive KV‑cache optimizations keep first‑token latency in the low‑second range on 8× H100 nodes, making it a practical production‑grade powerhouse.", + "description": "Virtuoso‑Large is Arcee's top‑tier general‑purpose LLM at 72 B parameters, tuned to tackle cross‑domain reasoning, creative writing and enterprise QA.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000075", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 64000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "arcee-ai/coder-large", - "canonical_slug": "arcee-ai/coder-large", - "hugging_face_id": "", "name": "Arcee AI: Coder Large", - "created": 1746478663, - "description": "Coder‑Large is a 32 B‑parameter offspring of Qwen 2.5‑Instruct that has been further trained on permissively‑licensed GitHub, CodeSearchNet and synthetic bug‑fix corpora. It supports a 32k context window, enabling multi‑file refactoring or long diff review in a single call, and understands 30‑plus programming languages with special attention to TypeScript, Go and Terraform. Internal benchmarks show 5–8 pt gains over CodeLlama‑34 B‑Python on HumanEval and competitive BugFix scores thanks to a reinforcement pass that rewards compilable output. The model emits structured explanations alongside code blocks by default, making it suitable for educational tooling as well as production copilot scenarios. Cost‑wise, Together AI prices it well below proprietary incumbents, so teams can scale interactive coding without runaway spend. ", + "description": "Coder‑Large is a 32 B‑parameter offspring of Qwen 2.5‑Instruct that has been further trained on permissively‑licensed GitHub, CodeSearchNet and synthetic bug‑fix corpora.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000005", - "completion": "0.0000008", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "microsoft/phi-4-reasoning-plus", - "canonical_slug": "microsoft/phi-4-reasoning-plus-04-30", - "hugging_face_id": "microsoft/Phi-4-reasoning-plus", "name": "Microsoft: Phi 4 Reasoning Plus", - "created": 1746130961, - "description": "Phi-4-reasoning-plus is an enhanced 14B parameter model from Microsoft, fine-tuned from Phi-4 with additional reinforcement learning to boost accuracy on math, science, and code reasoning tasks. It uses the same dense decoder-only transformer architecture as Phi-4, but generates longer, more comprehensive outputs structured into a step-by-step reasoning trace and final answer.\n\nWhile it offers improved benchmark scores over Phi-4-reasoning across tasks like AIME, OmniMath, and HumanEvalPlus, its responses are typically ~50% longer, resulting in higher latency. Designed for English-only applications, it is well-suited for structured reasoning workflows where output quality takes priority over response speed.", + "description": "Phi-4-reasoning-plus is an enhanced 14B parameter model from Microsoft, fine-tuned from Phi-4 with additional reinforcement learning to boost accuracy on math, science, and code reasoning tasks.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000007", - "completion": "0.00000035", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "microsoft/Phi-4-reasoning-plus" }, { "id": "inception/mercury-coder", - "canonical_slug": "inception/mercury-coder-small-beta", - "hugging_face_id": "", "name": "Inception: Mercury Coder", - "created": 1746033880, - "description": "Mercury Coder is the first diffusion large language model (dLLM). Applying a breakthrough discrete diffusion approach, the model runs 5-10x faster than even speed optimized models like Claude 3.5 Haiku and GPT-4o Mini while matching their performance. Mercury Coder's speed means that developers can stay in the flow while coding, enjoying rapid chat-based iteration and responsive code completion suggestions. On Copilot Arena, Mercury Coder ranks 1st in speed and ties for 2nd in quality. Read more in the [blog post here](https://www.inceptionlabs.ai/blog/introducing-mercury).", + "description": "Mercury Coder is the first diffusion large language model (dLLM).", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000025", - "completion": "0.000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen3-4b:free", - "canonical_slug": "qwen/qwen3-4b-04-28", - "hugging_face_id": "Qwen/Qwen3-4B", "name": "Qwen: Qwen3 4B (free)", - "created": 1746031104, - "description": "Qwen3-4B is a 4 billion parameter dense language model from the Qwen3 series, designed to support both general-purpose and reasoning-intensive tasks. It introduces a dual-mode architecture—thinking and non-thinking—allowing dynamic switching between high-precision logical reasoning and efficient dialogue generation. This makes it well-suited for multi-turn chat, instruction following, and complex agent workflows.", + "description": "Qwen3-4B is a 4 billion parameter dense language model from the Qwen3 series, designed to support both general-purpose and reasoning-intensive tasks.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "response_format", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-4B" }, { "id": "deepseek/deepseek-prover-v2", - "canonical_slug": "deepseek/deepseek-prover-v2", - "hugging_face_id": "deepseek-ai/DeepSeek-Prover-V2-671B", "name": "DeepSeek: DeepSeek Prover V2", - "created": 1746013094, - "description": "DeepSeek Prover V2 is a 671B parameter model, speculated to be geared towards logic and mathematics. Likely an upgrade from [DeepSeek-Prover-V1.5](https://huggingface.co/deepseek-ai/DeepSeek-Prover-V1.5-RL) Not much is known about the model yet, as DeepSeek released it on Hugging Face without an announcement or description.", + "description": "DeepSeek Prover V2 is a 671B parameter model, speculated to be geared towards logic and mathematics.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000005", - "completion": "0.00000218", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-Prover-V2-671B" }, { "id": "meta-llama/llama-guard-4-12b", - "canonical_slug": "meta-llama/llama-guard-4-12b", - "hugging_face_id": "meta-llama/Llama-Guard-4-12B", "name": "Meta: Llama Guard 4 12B", - "created": 1745975193, - "description": "Llama Guard 4 is a Llama 4 Scout-derived multimodal pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM—generating text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.\n\nLlama Guard 4 was aligned to safeguard against the standardized MLCommons hazards taxonomy and designed to support multimodal Llama 4 capabilities. Specifically, it combines features from previous Llama Guard models, providing content moderation for English and multiple supported languages, along with enhanced capabilities to handle mixed text-and-image prompts, including multiple images. Additionally, Llama Guard 4 is integrated into the Llama Moderations API, extending robust safety classification to text and images.", + "description": "Llama Guard 4 is a Llama 4 Scout-derived multimodal pretrained model, fine-tuned for content safety classification.", "context_length": 163840, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000018", - "completion": "0.00000018", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-Guard-4-12B" }, { "id": "qwen/qwen3-30b-a3b:free", - "canonical_slug": "qwen/qwen3-30b-a3b-04-28", - "hugging_face_id": "Qwen/Qwen3-30B-A3B", "name": "Qwen: Qwen3 30B A3B (free)", - "created": 1745878604, - "description": "Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent tasks. Its unique ability to switch seamlessly between a thinking mode for complex reasoning and a non-thinking mode for efficient dialogue ensures versatile, high-quality performance.\n\nSignificantly outperforming prior models like QwQ and Qwen2.5, Qwen3 delivers superior mathematics, coding, commonsense reasoning, creative writing, and interactive dialogue capabilities. The Qwen3-30B-A3B variant includes 30.5 billion parameters (3.3 billion activated), 48 layers, 128 experts (8 activated per task), and supports up to 131K token contexts with YaRN, setting a new standard among open-source models.", + "description": "Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-30B-A3B" }, { "id": "qwen/qwen3-30b-a3b", - "canonical_slug": "qwen/qwen3-30b-a3b-04-28", - "hugging_face_id": "Qwen/Qwen3-30B-A3B", "name": "Qwen: Qwen3 30B A3B", - "created": 1745878604, - "description": "Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent tasks. Its unique ability to switch seamlessly between a thinking mode for complex reasoning and a non-thinking mode for efficient dialogue ensures versatile, high-quality performance.\n\nSignificantly outperforming prior models like QwQ and Qwen2.5, Qwen3 delivers superior mathematics, coding, commonsense reasoning, creative writing, and interactive dialogue capabilities. The Qwen3-30B-A3B variant includes 30.5 billion parameters (3.3 billion activated), 48 layers, 128 experts (8 activated per task), and supports up to 131K token contexts with YaRN, setting a new standard among open-source models.", + "description": "Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0.00000006", - "completion": "0.00000022", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": 40960, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-30B-A3B" }, { "id": "qwen/qwen3-8b", - "canonical_slug": "qwen/qwen3-8b-04-28", - "hugging_face_id": "Qwen/Qwen3-8B", "name": "Qwen: Qwen3 8B", - "created": 1745876632, - "description": "Qwen3-8B is a dense 8.2B parameter causal language model from the Qwen3 series, designed for both reasoning-heavy tasks and efficient dialogue. It supports seamless switching between \"thinking\" mode for math, coding, and logical inference, and \"non-thinking\" mode for general conversation. The model is fine-tuned for instruction-following, agent integration, creative writing, and multilingual use across 100+ languages and dialects. It natively supports a 32K token context window and can extend to 131K tokens with YaRN scaling.", + "description": "Qwen3-8B is a dense 8.2B parameter causal language model from the Qwen3 series, designed for both reasoning-heavy tasks and efficient dialogue.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0.000000035", - "completion": "0.000000138", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 20000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-8B" }, { "id": "qwen/qwen3-14b:free", - "canonical_slug": "qwen/qwen3-14b-04-28", - "hugging_face_id": "Qwen/Qwen3-14B", "name": "Qwen: Qwen3 14B (free)", - "created": 1745876478, - "description": "Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for tasks like math, programming, and logical inference, and a \"non-thinking\" mode for general-purpose conversation. The model is fine-tuned for instruction-following, agent tool use, creative writing, and multilingual tasks across 100+ languages and dialects. It natively handles 32K token contexts and can extend to 131K tokens using YaRN-based scaling.", + "description": "Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-14B" }, { "id": "qwen/qwen3-14b", - "canonical_slug": "qwen/qwen3-14b-04-28", - "hugging_face_id": "Qwen/Qwen3-14B", "name": "Qwen: Qwen3 14B", - "created": 1745876478, - "description": "Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for tasks like math, programming, and logical inference, and a \"non-thinking\" mode for general-purpose conversation. The model is fine-tuned for instruction-following, agent tool use, creative writing, and multilingual tasks across 100+ languages and dialects. It natively handles 32K token contexts and can extend to 131K tokens using YaRN-based scaling.", + "description": "Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.00000022", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": 40960, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-14B" }, { "id": "qwen/qwen3-32b", - "canonical_slug": "qwen/qwen3-32b-04-28", - "hugging_face_id": "Qwen/Qwen3-32B", "name": "Qwen: Qwen3 32B", - "created": 1745875945, - "description": "Qwen3-32B is a dense 32.8B parameter causal language model from the Qwen3 series, optimized for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for tasks like math, coding, and logical inference, and a \"non-thinking\" mode for faster, general-purpose conversation. The model demonstrates strong performance in instruction-following, agent tool use, creative writing, and multilingual tasks across 100+ languages and dialects. It natively handles 32K token contexts and can extend to 131K tokens using YaRN-based scaling. ", + "description": "Qwen3-32B is a dense 32.8B parameter causal language model from the Qwen3 series, optimized for both complex reasoning and efficient dialogue.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.0000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": 40960, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-32B" }, { "id": "qwen/qwen3-235b-a22b:free", - "canonical_slug": "qwen/qwen3-235b-a22b-04-28", - "hugging_face_id": "Qwen/Qwen3-235B-A22B", "name": "Qwen: Qwen3 235B A22B (free)", - "created": 1745875757, - "description": "Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass. It supports seamless switching between a \"thinking\" mode for complex reasoning, math, and code tasks, and a \"non-thinking\" mode for general conversational efficiency. The model demonstrates strong reasoning ability, multilingual support (100+ languages and dialects), advanced instruction-following, and agent tool-calling capabilities. It natively handles a 32K token context window and extends up to 131K tokens using YaRN-based scaling.", + "description": "Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-235B-A22B" }, { "id": "qwen/qwen3-235b-a22b", - "canonical_slug": "qwen/qwen3-235b-a22b-04-28", - "hugging_face_id": "Qwen/Qwen3-235B-A22B", "name": "Qwen: Qwen3 235B A22B", - "created": 1745875757, - "description": "Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass. It supports seamless switching between a \"thinking\" mode for complex reasoning, math, and code tasks, and a \"non-thinking\" mode for general conversational efficiency. The model demonstrates strong reasoning ability, multilingual support (100+ languages and dialects), advanced instruction-following, and agent tool-calling capabilities. It natively handles a 32K token context window and extends up to 131K tokens using YaRN-based scaling.", + "description": "Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass.", "context_length": 40960, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen3", - "instruct_type": "qwen3" - }, - "pricing": { - "prompt": "0.00000018", - "completion": "0.00000054", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 40960, - "max_completion_tokens": 40960, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen3-235B-A22B" }, { "id": "tngtech/deepseek-r1t-chimera:free", - "canonical_slug": "tngtech/deepseek-r1t-chimera", - "hugging_face_id": "tngtech/DeepSeek-R1T-Chimera", "name": "TNG: DeepSeek R1T Chimera (free)", - "created": 1745760875, - "description": "DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3. It is based on a DeepSeek-MoE Transformer architecture and is optimized for general text generation tasks.\n\nThe model merges pretrained weights from both source models to balance performance across reasoning, efficiency, and instruction-following tasks. It is released under the MIT license and intended for research and commercial use.", + "description": "DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "tngtech/DeepSeek-R1T-Chimera" }, { "id": "tngtech/deepseek-r1t-chimera", - "canonical_slug": "tngtech/deepseek-r1t-chimera", - "hugging_face_id": "tngtech/DeepSeek-R1T-Chimera", "name": "TNG: DeepSeek R1T Chimera", - "created": 1745760875, - "description": "DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3. It is based on a DeepSeek-MoE Transformer architecture and is optimized for general text generation tasks.\n\nThe model merges pretrained weights from both source models to balance performance across reasoning, efficiency, and instruction-following tasks. It is released under the MIT license and intended for research and commercial use.", + "description": "DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": 163840, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "tngtech/DeepSeek-R1T-Chimera" }, { "id": "microsoft/mai-ds-r1:free", - "canonical_slug": "microsoft/mai-ds-r1", - "hugging_face_id": "microsoft/MAI-DS-R1", "name": "Microsoft: MAI DS R1 (free)", - "created": 1745194100, - "description": "MAI-DS-R1 is a post-trained variant of DeepSeek-R1 developed by the Microsoft AI team to improve the model’s responsiveness on previously blocked topics while enhancing its safety profile. Built on top of DeepSeek-R1’s reasoning foundation, it integrates 110k examples from the Tulu-3 SFT dataset and 350k internally curated multilingual safety-alignment samples. The model retains strong reasoning, coding, and problem-solving capabilities, while unblocking a wide range of prompts previously restricted in R1.\n\nMAI-DS-R1 demonstrates improved performance on harm mitigation benchmarks and maintains competitive results across general reasoning tasks. It surpasses R1-1776 in satisfaction metrics for blocked queries and reduces leakage in harmful content categories. The model is based on a transformer MoE architecture and is suitable for general-purpose use cases, excluding high-stakes domains such as legal, medical, or autonomous systems.", + "description": "MAI-DS-R1 is a post-trained variant of DeepSeek-R1 developed by the Microsoft AI team to improve the model’s responsiveness on previously blocked topics while enhancing its safety profile.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "microsoft/MAI-DS-R1" }, { "id": "microsoft/mai-ds-r1", - "canonical_slug": "microsoft/mai-ds-r1", - "hugging_face_id": "microsoft/MAI-DS-R1", "name": "Microsoft: MAI DS R1", - "created": 1745194100, - "description": "MAI-DS-R1 is a post-trained variant of DeepSeek-R1 developed by the Microsoft AI team to improve the model’s responsiveness on previously blocked topics while enhancing its safety profile. Built on top of DeepSeek-R1’s reasoning foundation, it integrates 110k examples from the Tulu-3 SFT dataset and 350k internally curated multilingual safety-alignment samples. The model retains strong reasoning, coding, and problem-solving capabilities, while unblocking a wide range of prompts previously restricted in R1.\n\nMAI-DS-R1 demonstrates improved performance on harm mitigation benchmarks and maintains competitive results across general reasoning tasks. It surpasses R1-1776 in satisfaction metrics for blocked queries and reduces leakage in harmful content categories. The model is based on a transformer MoE architecture and is suitable for general-purpose use cases, excluding high-stakes domains such as legal, medical, or autonomous systems.", + "description": "MAI-DS-R1 is a post-trained variant of DeepSeek-R1 developed by the Microsoft AI team to improve the model’s responsiveness on previously blocked topics while enhancing its safety profile.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": 163840, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "microsoft/MAI-DS-R1" }, { "id": "openai/o4-mini-high", - "canonical_slug": "openai/o4-mini-high-2025-04-16", - "hugging_face_id": "", "name": "OpenAI: o4 Mini High", - "created": 1744824212, - "description": "OpenAI o4-mini-high is the same model as [o4-mini](/openai/o4-mini) with reasoning_effort set to high. \n\nOpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning and coding performance across benchmarks like AIME (99.5% with Python) and SWE-bench, outperforming its predecessor o3-mini and even approaching o3 in some domains.\n\nDespite its smaller size, o4-mini exhibits high accuracy in STEM tasks, visual problem solving (e.g., MathVista, MMMU), and code editing. It is especially well-suited for high-throughput scenarios where latency or cost is critical. Thanks to its efficient architecture and refined reinforcement learning training, o4-mini can chain tools, generate structured outputs, and solve multi-step tasks with minimal delay—often in under a minute.", + "description": "OpenAI o4-mini-high is the same model as [o4-mini](/openai/o4-mini) with reasoning_effort set to high.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000011", - "completion": "0.0000044", - "request": "0", - "image": "0.0008415", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.000000275" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/o3", - "canonical_slug": "openai/o3-2025-04-16", - "hugging_face_id": "", "name": "OpenAI: o3", - "created": 1744823457, - "description": "o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following. Use it to think through multi-step problems that involve analysis across text, code, and images. ", + "description": "o3 is a well-rounded and powerful model across domains.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000008", - "request": "0", - "image": "0.00153", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.0000005" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/o4-mini", - "canonical_slug": "openai/o4-mini-2025-04-16", - "hugging_face_id": "", "name": "OpenAI: o4 Mini", - "created": 1744820942, - "description": "OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning and coding performance across benchmarks like AIME (99.5% with Python) and SWE-bench, outperforming its predecessor o3-mini and even approaching o3 in some domains.\n\nDespite its smaller size, o4-mini exhibits high accuracy in STEM tasks, visual problem solving (e.g., MathVista, MMMU), and code editing. It is especially well-suited for high-throughput scenarios where latency or cost is critical. Thanks to its efficient architecture and refined reinforcement learning training, o4-mini can chain tools, generate structured outputs, and solve multi-step tasks with minimal delay—often in under a minute.", + "description": "OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000011", - "completion": "0.0000044", - "request": "0", - "image": "0.0008415", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.000000275" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "qwen/qwen2.5-coder-7b-instruct", - "canonical_slug": "qwen/qwen2.5-coder-7b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-Coder-7B-Instruct", "name": "Qwen: Qwen2.5 Coder 7B Instruct", - "created": 1744734887, - "description": "Qwen2.5-Coder-7B-Instruct is a 7B parameter instruction-tuned language model optimized for code-related tasks such as code generation, reasoning, and bug fixing. Based on the Qwen2.5 architecture, it incorporates enhancements like RoPE, SwiGLU, RMSNorm, and GQA attention with support for up to 128K tokens using YaRN-based extrapolation. It is trained on a large corpus of source code, synthetic data, and text-code grounding, providing robust performance across programming languages and agentic coding workflows.\n\nThis model is part of the Qwen2.5-Coder family and offers strong compatibility with tools like vLLM for efficient deployment. Released under the Apache 2.0 license.", + "description": "Qwen2.5-Coder-7B-Instruct is a 7B parameter instruction-tuned language model optimized for code-related tasks such as code generation, reasoning, and bug fixing.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000003", - "completion": "0.00000009", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen2.5-Coder-7B-Instruct" }, { "id": "openai/gpt-4.1", - "canonical_slug": "openai/gpt-4.1-2025-04-14", - "hugging_face_id": "", "name": "OpenAI: GPT-4.1", - "created": 1744651385, - "description": "GPT-4.1 is a flagship large language model optimized for advanced instruction following, real-world software engineering, and long-context reasoning. It supports a 1 million token context window and outperforms GPT-4o and GPT-4.5 across coding (54.6% SWE-bench Verified), instruction compliance (87.4% IFEval), and multimodal understanding benchmarks. It is tuned for precise code diffs, agent reliability, and high recall in large document contexts, making it ideal for agents, IDE tooling, and enterprise knowledge retrieval.", + "description": "GPT-4.1 is a flagship large language model optimized for advanced instruction following, real-world software engineering, and long-context reasoning.", "context_length": 1047576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000008", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.0000005" - }, - "top_provider": { - "context_length": 1047576, - "max_completion_tokens": 32768, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-4.1-mini", - "canonical_slug": "openai/gpt-4.1-mini-2025-04-14", - "hugging_face_id": "", "name": "OpenAI: GPT-4.1 Mini", - "created": 1744651381, - "description": "GPT-4.1 Mini is a mid-sized model delivering performance competitive with GPT-4o at substantially lower latency and cost. It retains a 1 million token context window and scores 45.1% on hard instruction evals, 35.8% on MultiChallenge, and 84.1% on IFEval. Mini also shows strong coding ability (e.g., 31.6% on Aider’s polyglot diff benchmark) and vision understanding, making it suitable for interactive applications with tight performance constraints.", + "description": "GPT-4.1 Mini is a mid-sized model delivering performance competitive with GPT-4o at substantially lower latency and cost.", "context_length": 1047576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.0000016", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.0000001" - }, - "top_provider": { - "context_length": 1047576, - "max_completion_tokens": 32768, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-4.1-nano", - "canonical_slug": "openai/gpt-4.1-nano-2025-04-14", - "hugging_face_id": "", "name": "OpenAI: GPT-4.1 Nano", - "created": 1744651369, - "description": "For tasks that demand low latency, GPT‑4.1 nano is the fastest and cheapest model in the GPT-4.1 series. It delivers exceptional performance at a small size with its 1 million token context window, and scores 80.1% on MMLU, 50.3% on GPQA, and 9.8% on Aider polyglot coding – even higher than GPT‑4o mini. It’s ideal for tasks like classification or autocompletion.", + "description": "For tasks that demand low latency, GPT‑4.1 nano is the fastest and cheapest model in the GPT-4.1 series.", "context_length": 1047576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["image", "text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.000000025" - }, - "top_provider": { - "context_length": 1047576, - "max_completion_tokens": 32768, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "eleutherai/llemma_7b", - "canonical_slug": "eleutherai/llemma_7b", - "hugging_face_id": "EleutherAI/llemma_7b", "name": "EleutherAI: Llemma 7b", - "created": 1744643225, - "description": "Llemma 7B is a language model for mathematics. It was initialized with Code Llama 7B weights, and trained on the Proof-Pile-2 for 200B tokens. Llemma models are particularly strong at chain-of-thought mathematical reasoning and using computational tools for mathematics, such as Python and formal theorem provers.", + "description": "Llemma 7B is a language model for mathematics.", "context_length": 4096, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "code-llama" - }, - "pricing": { - "prompt": "0.0000008", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4096, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "EleutherAI/llemma_7b" }, { "id": "alfredpros/codellama-7b-instruct-solidity", - "canonical_slug": "alfredpros/codellama-7b-instruct-solidity", - "hugging_face_id": "AlfredPros/CodeLlama-7b-Instruct-Solidity", "name": "AlfredPros: CodeLLaMa 7B Instruct Solidity", - "created": 1744641874, "description": "A finetuned 7 billion parameters Code LLaMA - Instruct model to generate Solidity smart contract using 4-bit QLoRA finetuning provided by PEFT library.", "context_length": 4096, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "alpaca" - }, - "pricing": { - "prompt": "0.0000008", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4096, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "AlfredPros/CodeLlama-7b-Instruct-Solidity" }, { "id": "arliai/qwq-32b-arliai-rpr-v1:free", - "canonical_slug": "arliai/qwq-32b-arliai-rpr-v1", - "hugging_face_id": "ArliAI/QwQ-32B-ArliAI-RpR-v1", "name": "ArliAI: QwQ 32B RpR v1 (free)", - "created": 1744555982, - "description": "QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series. It is designed to maintain coherence and reasoning across long multi-turn conversations by introducing explicit reasoning steps per dialogue turn, generated and refined using the base model itself.\n\nThe model was trained using RS-QLORA+ on 8K sequence lengths and supports up to 128K context windows (with practical performance around 32K). It is optimized for creative roleplay and dialogue generation, with an emphasis on minimizing cross-context repetition while preserving stylistic diversity.", + "description": "QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "ArliAI/QwQ-32B-ArliAI-RpR-v1" }, { "id": "arliai/qwq-32b-arliai-rpr-v1", - "canonical_slug": "arliai/qwq-32b-arliai-rpr-v1", - "hugging_face_id": "ArliAI/QwQ-32B-ArliAI-RpR-v1", "name": "ArliAI: QwQ 32B RpR v1", - "created": 1744555982, - "description": "QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series. It is designed to maintain coherence and reasoning across long multi-turn conversations by introducing explicit reasoning steps per dialogue turn, generated and refined using the base model itself.\n\nThe model was trained using RS-QLORA+ on 8K sequence lengths and supports up to 128K context windows (with practical performance around 32K). It is optimized for creative roleplay and dialogue generation, with an emphasis on minimizing cross-context repetition while preserving stylistic diversity.", + "description": "QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.00000003", - "completion": "0.00000011", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "ArliAI/QwQ-32B-ArliAI-RpR-v1" }, { "id": "agentica-org/deepcoder-14b-preview:free", - "canonical_slug": "agentica-org/deepcoder-14b-preview", - "hugging_face_id": "agentica-org/DeepCoder-14B-Preview", "name": "Agentica: Deepcoder 14B Preview (free)", - "created": 1744555395, - "description": "DeepCoder-14B-Preview is a 14B parameter code generation model fine-tuned from DeepSeek-R1-Distill-Qwen-14B using reinforcement learning with GRPO+ and iterative context lengthening. It is optimized for long-context program synthesis and achieves strong performance across coding benchmarks, including 60.6% on LiveCodeBench v5, competitive with models like o3-Mini", + "description": "DeepCoder-14B-Preview is a 14B parameter code generation model fine-tuned from DeepSeek-R1-Distill-Qwen-14B using reinforcement learning with GRPO+ and iterative context lengthening.", "context_length": 96000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 96000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "agentica-org/DeepCoder-14B-Preview" }, { "id": "agentica-org/deepcoder-14b-preview", - "canonical_slug": "agentica-org/deepcoder-14b-preview", - "hugging_face_id": "agentica-org/DeepCoder-14B-Preview", "name": "Agentica: Deepcoder 14B Preview", - "created": 1744555395, - "description": "DeepCoder-14B-Preview is a 14B parameter code generation model fine-tuned from DeepSeek-R1-Distill-Qwen-14B using reinforcement learning with GRPO+ and iterative context lengthening. It is optimized for long-context program synthesis and achieves strong performance across coding benchmarks, including 60.6% on LiveCodeBench v5, competitive with models like o3-Mini", + "description": "DeepCoder-14B-Preview is a 14B parameter code generation model fine-tuned from DeepSeek-R1-Distill-Qwen-14B using reinforcement learning with GRPO+ and iterative context lengthening.", "context_length": 96000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.000000015", - "completion": "0.000000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 96000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "agentica-org/DeepCoder-14B-Preview" }, { "id": "x-ai/grok-3-mini-beta", - "canonical_slug": "x-ai/grok-3-mini-beta", - "hugging_face_id": "", "name": "xAI: Grok 3 Mini Beta", - "created": 1744240195, - "description": "Grok 3 Mini is a lightweight, smaller thinking model. Unlike traditional models that generate answers immediately, Grok 3 Mini thinks before responding. It’s ideal for reasoning-heavy tasks that don’t demand extensive domain knowledge, and shines in math-specific and quantitative use cases, such as solving challenging puzzles or math problems.\n\nTransparent \"thinking\" traces accessible. Defaults to low reasoning, can boost with setting `reasoning: { effort: \"high\" }`\n\nNote: That there are two xAI endpoints for this model. By default when using this model we will always route you to the base endpoint. If you want the fast endpoint you can add `provider: { sort: throughput}`, to sort by throughput instead. \n", + "description": "Grok 3 Mini is a lightweight, smaller thinking model.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Grok", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000075" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "logprobs", - "max_tokens", - "reasoning", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "x-ai/grok-3-beta", - "canonical_slug": "x-ai/grok-3-beta", - "hugging_face_id": "", "name": "xAI: Grok 3 Beta", - "created": 1744240068, - "description": "Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in finance, healthcare, law, and science.\n\nExcels in structured tasks and benchmarks like GPQA, LCB, and MMLU-Pro where it outperforms Grok 3 Mini even on high thinking. \n\nNote: That there are two xAI endpoints for this model. By default when using this model we will always route you to the base endpoint. If you want the fast endpoint you can add `provider: { sort: throughput}`, to sort by throughput instead. \n", + "description": "Grok 3 is the latest model from xAI.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Grok", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000075" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "nvidia/llama-3.1-nemotron-ultra-253b-v1", - "canonical_slug": "nvidia/llama-3.1-nemotron-ultra-253b-v1", - "hugging_face_id": "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1", "name": "NVIDIA: Llama 3.1 Nemotron Ultra 253B v1", - "created": 1744115059, - "description": "Llama-3.1-Nemotron-Ultra-253B-v1 is a large language model (LLM) optimized for advanced reasoning, human-interactive chat, retrieval-augmented generation (RAG), and tool-calling tasks. Derived from Meta’s Llama-3.1-405B-Instruct, it has been significantly customized using Neural Architecture Search (NAS), resulting in enhanced efficiency, reduced memory usage, and improved inference latency. The model supports a context length of up to 128K tokens and can operate efficiently on an 8x NVIDIA H100 node.\n\nNote: you must include `detailed thinking on` in the system prompt to enable reasoning. Please see [Usage Recommendations](https://huggingface.co/nvidia/Llama-3_1-Nemotron-Ultra-253B-v1#quick-start-and-usage-recommendations) for more.", + "description": "Llama-3.1-Nemotron-Ultra-253B-v1 is a large language model (LLM) optimized for advanced reasoning, human-interactive chat, retrieval-augmented generation (RAG), and tool-calling tasks.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000006", - "completion": "0.0000018", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1" }, { "id": "meta-llama/llama-4-maverick:free", - "canonical_slug": "meta-llama/llama-4-maverick-17b-128e-instruct", - "hugging_face_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct", "name": "Meta: Llama 4 Maverick (free)", - "created": 1743881822, - "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward pass (400B total). It supports multilingual text and image input, and produces multilingual text and code output across 12 supported languages. Optimized for vision-language tasks, Maverick is instruction-tuned for assistant-like behavior, image reasoning, and general-purpose multimodal interaction.\n\nMaverick features early fusion for native multimodality and a 1 million token context window. It was trained on a curated mixture of public, licensed, and Meta-platform data, covering ~22 trillion tokens, with a knowledge cutoff in August 2024. Released on April 5, 2025 under the Llama 4 Community License, Maverick is suited for research and commercial applications requiring advanced multimodal understanding and high model throughput.", + "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Llama4", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4028, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "repetition_penalty", - "response_format", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct" }, { "id": "meta-llama/llama-4-maverick", - "canonical_slug": "meta-llama/llama-4-maverick-17b-128e-instruct", - "hugging_face_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct", "name": "Meta: Llama 4 Maverick", - "created": 1743881822, - "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward pass (400B total). It supports multilingual text and image input, and produces multilingual text and code output across 12 supported languages. Optimized for vision-language tasks, Maverick is instruction-tuned for assistant-like behavior, image reasoning, and general-purpose multimodal interaction.\n\nMaverick features early fusion for native multimodality and a 1 million token context window. It was trained on a curated mixture of public, licensed, and Meta-platform data, covering ~22 trillion tokens, with a knowledge cutoff in August 2024. Released on April 5, 2025 under the Llama 4 Community License, Maverick is suited for research and commercial applications requiring advanced multimodal understanding and high model throughput.", + "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Llama4", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.0000006", - "request": "0", - "image": "0.0006684", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct" }, { "id": "meta-llama/llama-4-scout:free", - "canonical_slug": "meta-llama/llama-4-scout-17b-16e-instruct", - "hugging_face_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct", "name": "Meta: Llama 4 Scout (free)", - "created": 1743881519, - "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input (text and image) and multilingual output (text and code) across 12 supported languages. Designed for assistant-style interaction and visual reasoning, Scout uses 16 experts per forward pass and features a context length of 10 million tokens, with a training corpus of ~40 trillion tokens.\n\nBuilt for high efficiency and local or commercial deployment, Llama 4 Scout incorporates early fusion for seamless modality integration. It is instruction-tuned for use in multilingual chat, captioning, and image understanding tasks. Released under the Llama 4 Community License, it was last trained on data up to August 2024 and launched publicly on April 5, 2025.", + "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Llama4", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4028, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "repetition_penalty", - "response_format", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct" }, { "id": "meta-llama/llama-4-scout", - "canonical_slug": "meta-llama/llama-4-scout-17b-16e-instruct", - "hugging_face_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct", "name": "Meta: Llama 4 Scout", - "created": 1743881519, - "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input (text and image) and multilingual output (text and code) across 12 supported languages. Designed for assistant-style interaction and visual reasoning, Scout uses 16 experts per forward pass and features a context length of 10 million tokens, with a training corpus of ~40 trillion tokens.\n\nBuilt for high efficiency and local or commercial deployment, Llama 4 Scout incorporates early fusion for seamless modality integration. It is instruction-tuned for use in multilingual chat, captioning, and image understanding tasks. Released under the Llama 4 Community License, it was last trained on data up to August 2024 and launched publicly on April 5, 2025.", + "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B.", "context_length": 327680, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Llama4", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000008", - "completion": "0.0000003", - "request": "0", - "image": "0.0003342", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 327680, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct" }, { "id": "qwen/qwen2.5-vl-32b-instruct:free", - "canonical_slug": "qwen/qwen2.5-vl-32b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-VL-32B-Instruct", "name": "Qwen: Qwen2.5 VL 32B Instruct (free)", - "created": 1742839838, - "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities. It excels at visual analysis tasks, including object recognition, textual interpretation within images, and precise event localization in extended videos. Qwen2.5-VL-32B demonstrates state-of-the-art performance across multimodal benchmarks such as MMMU, MathVista, and VideoMME, while maintaining strong reasoning and clarity in text-based tasks like MMLU, mathematical problem-solving, and code generation.", + "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities.", "context_length": 16384, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16384, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen2.5-VL-32B-Instruct" }, { "id": "qwen/qwen2.5-vl-32b-instruct", - "canonical_slug": "qwen/qwen2.5-vl-32b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-VL-32B-Instruct", "name": "Qwen: Qwen2.5 VL 32B Instruct", - "created": 1742839838, - "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities. It excels at visual analysis tasks, including object recognition, textual interpretation within images, and precise event localization in extended videos. Qwen2.5-VL-32B demonstrates state-of-the-art performance across multimodal benchmarks such as MMMU, MathVista, and VideoMME, while maintaining strong reasoning and clarity in text-based tasks like MMLU, mathematical problem-solving, and code generation.", + "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities.", "context_length": 16384, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.00000022", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16384, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen2.5-VL-32B-Instruct" }, { "id": "deepseek/deepseek-chat-v3-0324:free", - "canonical_slug": "deepseek/deepseek-chat-v3-0324", - "hugging_face_id": "deepseek-ai/DeepSeek-V3-0324", "name": "DeepSeek: DeepSeek V3 0324 (free)", - "created": 1742824755, - "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.\n\nIt succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well on a variety of tasks.", + "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-V3-0324" }, { "id": "deepseek/deepseek-chat-v3-0324", - "canonical_slug": "deepseek/deepseek-chat-v3-0324", - "hugging_face_id": "deepseek-ai/DeepSeek-V3-0324", "name": "DeepSeek: DeepSeek V3 0324", - "created": 1742824755, - "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.\n\nIt succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well on a variety of tasks.", + "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000024", - "completion": "0.00000084", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": 163840, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-V3-0324" }, { "id": "openai/o1-pro", - "canonical_slug": "openai/o1-pro", - "hugging_face_id": "", "name": "OpenAI: o1-pro", - "created": 1742423211, - "description": "The o1 series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o1-pro model uses more compute to think harder and provide consistently better answers.", + "description": "The o1 series of models are trained with reinforcement learning to think before they answer and perform complex reasoning.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00015", - "completion": "0.0006", - "request": "0", - "image": "0.21675", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "response_format", - "seed", - "structured_outputs" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/mistral-small-3.1-24b-instruct:free", - "canonical_slug": "mistralai/mistral-small-3.1-24b-instruct-2503", - "hugging_face_id": "mistralai/Mistral-Small-3.1-24B-Instruct-2503", "name": "Mistral: Mistral Small 3.1 24B (free)", - "created": 1742238937, - "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and vision tasks, including image analysis, programming, mathematical reasoning, and multilingual support across dozens of languages. Equipped with an extensive 128k token context window and optimized for efficient local inference, it supports use cases such as conversational agents, function calling, long-document comprehension, and privacy-sensitive deployments. The updated version is [Mistral Small 3.2](mistralai/mistral-small-3.2-24b-instruct)", + "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities.", "context_length": 96000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 96000, - "max_completion_tokens": 96000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-Small-3.1-24B-Instruct-2503" }, { "id": "mistralai/mistral-small-3.1-24b-instruct", - "canonical_slug": "mistralai/mistral-small-3.1-24b-instruct-2503", - "hugging_face_id": "mistralai/Mistral-Small-3.1-24B-Instruct-2503", "name": "Mistral: Mistral Small 3.1 24B", - "created": 1742238937, - "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and vision tasks, including image analysis, programming, mathematical reasoning, and multilingual support across dozens of languages. Equipped with an extensive 128k token context window and optimized for efficient local inference, it supports use cases such as conversational agents, function calling, long-document comprehension, and privacy-sensitive deployments. The updated version is [Mistral Small 3.2](mistralai/mistral-small-3.2-24b-instruct)", + "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.00000022", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-Small-3.1-24B-Instruct-2503" }, { "id": "allenai/olmo-2-0325-32b-instruct", - "canonical_slug": "allenai/olmo-2-0325-32b-instruct", - "hugging_face_id": "allenai/OLMo-2-0325-32B-Instruct", "name": "AllenAI: Olmo 2 32B Instruct", - "created": 1741988556, - "description": "OLMo-2 32B Instruct is a supervised instruction-finetuned variant of the OLMo-2 32B March 2025 base model. It excels in complex reasoning and instruction-following tasks across diverse benchmarks such as GSM8K, MATH, IFEval, and general NLP evaluation. Developed by AI2, OLMo-2 32B is part of an open, research-oriented initiative, trained primarily on English-language datasets to advance the understanding and development of open-source language models.", + "description": "OLMo-2 32B Instruct is a supervised instruction-finetuned variant of the OLMo-2 32B March 2025 base model.", "context_length": 4096, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.00000035", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4096, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "allenai/OLMo-2-0325-32B-Instruct" }, { "id": "google/gemma-3-4b-it:free", - "canonical_slug": "google/gemma-3-4b-it", - "hugging_face_id": "google/gemma-3-4b-it", "name": "Google: Gemma 3 4B (free)", - "created": 1741905510, - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling.", + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", "context_length": 32768, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": "gemma" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs", - "temperature", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "google/gemma-3-4b-it" }, { "id": "google/gemma-3-4b-it", - "canonical_slug": "google/gemma-3-4b-it", - "hugging_face_id": "google/gemma-3-4b-it", "name": "Google: Gemma 3 4B", - "created": 1741905510, - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling.", + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", "context_length": 96000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": "gemma" - }, - "pricing": { - "prompt": "0.00000001703012", - "completion": "0.0000000681536", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 96000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "google/gemma-3-4b-it" }, { "id": "google/gemma-3-12b-it:free", - "canonical_slug": "google/gemma-3-12b-it", - "hugging_face_id": "google/gemma-3-12b-it", "name": "Google: Gemma 3 12B (free)", - "created": 1741902625, - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Gemma 3 12B is the second largest in the family of Gemma 3 models after [Gemma 3 27B](google/gemma-3-27b-it)", + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", "context_length": 32768, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": "gemma" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "seed", "temperature", "top_p"], - "default_parameters": {} + "hugging_face_id": "google/gemma-3-12b-it" }, { "id": "google/gemma-3-12b-it", - "canonical_slug": "google/gemma-3-12b-it", - "hugging_face_id": "google/gemma-3-12b-it", "name": "Google: Gemma 3 12B", - "created": 1741902625, - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Gemma 3 12B is the second largest in the family of Gemma 3 models after [Gemma 3 27B](google/gemma-3-27b-it)", + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": "gemma" - }, - "pricing": { - "prompt": "0.00000003", - "completion": "0.0000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "google/gemma-3-12b-it" }, { "id": "cohere/command-a", - "canonical_slug": "cohere/command-a-03-2025", - "hugging_face_id": "CohereForAI/c4ai-command-a-03-2025", "name": "Cohere: Command A", - "created": 1741894342, - "description": "Command A is an open-weights 111B parameter model with a 256k context window focused on delivering great performance across agentic, multilingual, and coding use cases.\nCompared to other leading proprietary and open-weights models Command A delivers maximum performance with minimum hardware costs, excelling on business-critical agentic and multilingual tasks.", + "description": "Command A is an open-weights 111B parameter model with a 256k context window focused on delivering great performance across agentic, multilingual, and coding use cases.", "context_length": 256000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": 8192, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "CohereForAI/c4ai-command-a-03-2025" }, { "id": "openai/gpt-4o-mini-search-preview", - "canonical_slug": "openai/gpt-4o-mini-search-preview-2025-03-11", - "hugging_face_id": "", "name": "OpenAI: GPT-4o-mini Search Preview", - "created": 1741818122, - "description": "GPT-4o mini Search Preview is a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.", + "description": "GPT-4o mini Search Preview is a specialized model for web search in Chat Completions.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.0000006", - "request": "0.0275", - "image": "0.000217", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "structured_outputs", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-4o-search-preview", - "canonical_slug": "openai/gpt-4o-search-preview-2025-03-11", - "hugging_face_id": "", "name": "OpenAI: GPT-4o Search Preview", - "created": 1741817949, - "description": "GPT-4o Search Previewis a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.", + "description": "GPT-4o Search Previewis a specialized model for web search in Chat Completions.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.00001", - "request": "0.035", - "image": "0.003613", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "structured_outputs", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "google/gemma-3-27b-it:free", - "canonical_slug": "google/gemma-3-27b-it", - "hugging_face_id": "", "name": "Google: Gemma 3 27B (free)", - "created": 1741756359, - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Gemma 3 27B is Google's latest open source model, successor to [Gemma 2](google/gemma-2-27b-it)", + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": "gemma" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "google/gemma-3-27b-it", - "canonical_slug": "google/gemma-3-27b-it", - "hugging_face_id": "", "name": "Google: Gemma 3 27B", - "created": 1741756359, - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Gemma 3 27B is Google's latest open source model, successor to [Gemma 2](google/gemma-2-27b-it)", + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": "gemma" - }, - "pricing": { - "prompt": "0.00000009", - "completion": "0.00000016", - "request": "0", - "image": "0.0000256", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "thedrummer/skyfall-36b-v2", - "canonical_slug": "thedrummer/skyfall-36b-v2", - "hugging_face_id": "TheDrummer/Skyfall-36B-v2", "name": "TheDrummer: Skyfall 36B V2", - "created": 1741636566, "description": "Skyfall 36B v2 is an enhanced iteration of Mistral Small 2501, specifically fine-tuned for improved creativity, nuanced writing, role-playing, and coherent storytelling.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000005", - "completion": "0.0000008", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "TheDrummer/Skyfall-36B-v2" }, { "id": "microsoft/phi-4-multimodal-instruct", - "canonical_slug": "microsoft/phi-4-multimodal-instruct", - "hugging_face_id": "microsoft/Phi-4-multimodal-instruct", "name": "Microsoft: Phi 4 Multimodal Instruct", - "created": 1741396284, - "description": "Phi-4 Multimodal Instruct is a versatile 5.6B parameter foundation model that combines advanced reasoning and instruction-following capabilities across both text and visual inputs, providing accurate text outputs. The unified architecture enables efficient, low-latency inference, suitable for edge and mobile deployments. Phi-4 Multimodal Instruct supports text inputs in multiple languages including Arabic, Chinese, English, French, German, Japanese, Spanish, and more, with visual input optimized primarily for English. It delivers impressive performance on multimodal tasks involving mathematical, scientific, and document reasoning, providing developers and enterprises a powerful yet compact model for sophisticated interactive applications. For more information, see the [Phi-4 Multimodal blog post](https://azure.microsoft.com/en-us/blog/empowering-innovation-the-next-generation-of-the-phi-family/).\n", + "description": "Phi-4 Multimodal Instruct is a versatile 5.6B parameter foundation model that combines advanced reasoning and instruction-following capabilities across both text and visual inputs, providing accurate.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.0000001", - "request": "0", - "image": "0.00017685", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "microsoft/Phi-4-multimodal-instruct" }, { "id": "perplexity/sonar-reasoning-pro", - "canonical_slug": "perplexity/sonar-reasoning-pro", - "hugging_face_id": "", "name": "Perplexity: Sonar Reasoning Pro", - "created": 1741313308, - "description": "Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro)\n\nSonar Reasoning Pro is a premier reasoning model powered by DeepSeek R1 with Chain of Thought (CoT). Designed for advanced use cases, it supports in-depth, multi-step queries with a larger context window and can surface more citations per search, enabling more comprehensive and extensible responses.", + "description": "Note: Sonar Pro pricing includes Perplexity search pricing.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000008", - "request": "0", - "image": "0", - "web_search": "0.005", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "temperature", - "top_k", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "perplexity/sonar-pro", - "canonical_slug": "perplexity/sonar-pro", - "hugging_face_id": "", "name": "Perplexity: Sonar Pro", - "created": 1741312423, - "description": "Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro)\n\nFor enterprises seeking more advanced capabilities, the Sonar Pro API can handle in-depth, multi-step queries with added extensibility, like double the number of citations per search as Sonar on average. Plus, with a larger context window, it can handle longer and more nuanced searches and follow-up questions. ", + "description": "Note: Sonar Pro pricing includes Perplexity search pricing.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0", - "web_search": "0.005", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 8000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "temperature", - "top_k", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "perplexity/sonar-deep-research", - "canonical_slug": "perplexity/sonar-deep-research", - "hugging_face_id": "", "name": "Perplexity: Sonar Deep Research", - "created": 1741311246, - "description": "Sonar Deep Research is a research-focused model designed for multi-step retrieval, synthesis, and reasoning across complex topics. It autonomously searches, reads, and evaluates sources, refining its approach as it gathers information. This enables comprehensive report generation across domains like finance, technology, health, and current events.\n\nNotes on Pricing ([Source](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-deep-research)) \n- Input tokens comprise of Prompt tokens (user prompt) + Citation tokens (these are processed tokens from running searches)\n- Deep Research runs multiple searches to conduct exhaustive research. Searches are priced at $5/1000 searches. A request that does 30 searches will cost $0.15 in this step.\n- Reasoning is a distinct step in Deep Research since it does extensive automated reasoning through all the material it gathers during its research phase. Reasoning tokens here are a bit different than the CoTs in the answer - these are tokens that we use to reason through the research material prior to generating the outputs via the CoTs. Reasoning tokens are priced at $3/1M tokens", + "description": "Sonar Deep Research is a research-focused model designed for multi-step retrieval, synthesis, and reasoning across complex topics.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000008", - "request": "0", - "image": "0", - "web_search": "0.005", - "internal_reasoning": "0.000003" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "temperature", - "top_k", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "qwen/qwq-32b", - "canonical_slug": "qwen/qwq-32b", - "hugging_face_id": "Qwen/QwQ-32B", "name": "Qwen: QwQ 32B", - "created": 1741208814, - "description": "QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks, especially hard problems. QwQ-32B is the medium-sized reasoning model, which is capable of achieving competitive performance against state-of-the-art reasoning models, e.g., DeepSeek-R1, o1-mini.", + "description": "QwQ is the reasoning model of the Qwen series.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "qwq" - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/QwQ-32B" }, { "id": "google/gemini-2.0-flash-lite-001", - "canonical_slug": "google/gemini-2.0-flash-lite-001", - "hugging_face_id": "", "name": "Google: Gemini 2.0 Flash Lite", - "created": 1740506212, - "description": "Gemini 2.0 Flash Lite offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5), all at extremely economical token prices.", + "description": "Gemini 2.0 Flash Lite offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file", "audio", "video"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000000075", - "completion": "0.0000003", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "anthropic/claude-3.7-sonnet:thinking", - "canonical_slug": "anthropic/claude-3-7-sonnet-20250219", - "hugging_face_id": "", "name": "Anthropic: Claude 3.7 Sonnet (thinking)", - "created": 1740422110, - "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. \n\nClaude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks.\n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", + "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0.0048", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000003", - "input_cache_write": "0.00000375" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 64000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "stop", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "anthropic/claude-3.7-sonnet", - "canonical_slug": "anthropic/claude-3-7-sonnet-20250219", - "hugging_face_id": "", "name": "Anthropic: Claude 3.7 Sonnet", - "created": 1740422110, - "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. \n\nClaude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks.\n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", + "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0.0048", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000003", - "input_cache_write": "0.00000375" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 64000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/mistral-saba", - "canonical_slug": "mistralai/mistral-saba-2502", - "hugging_face_id": "", "name": "Mistral: Saba", - "created": 1739803239, - "description": "Mistral Saba is a 24B-parameter language model specifically designed for the Middle East and South Asia, delivering accurate and contextually relevant responses while maintaining efficient performance. Trained on curated regional datasets, it supports multiple Indian-origin languages—including Tamil and Malayalam—alongside Arabic. This makes it a versatile option for a range of regional and multilingual applications. Read more at the blog post [here](https://mistral.ai/en/news/mistral-saba)", + "description": "Mistral Saba is a 24B-parameter language model specifically designed for the Middle East and South Asia, delivering accurate and contextually relevant responses while maintaining efficient performance.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "meta-llama/llama-guard-3-8b", - "canonical_slug": "meta-llama/llama-guard-3-8b", - "hugging_face_id": "meta-llama/Llama-Guard-3-8B", "name": "Llama Guard 3 8B", - "created": 1739401318, - "description": "Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM – it generates text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.\n\nLlama Guard 3 was aligned to safeguard against the MLCommons standardized hazards taxonomy and designed to support Llama 3.1 capabilities. Specifically, it provides content moderation in 8 languages, and was optimized to support safety and security for search and code interpreter tool calls.\n", + "description": "Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "none" - }, - "pricing": { - "prompt": "0.00000002", - "completion": "0.00000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-Guard-3-8B" }, { "id": "openai/o3-mini-high", - "canonical_slug": "openai/o3-mini-high-2025-01-31", - "hugging_face_id": "", "name": "OpenAI: o3 Mini High", - "created": 1739372611, - "description": "OpenAI o3-mini-high is the same model as [o3-mini](/openai/o3-mini) with reasoning_effort set to high. \n\no3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding. The model features three adjustable reasoning effort levels and supports key developer capabilities including function calling, structured outputs, and streaming, though it does not include vision processing capabilities.\n\nThe model demonstrates significant improvements over its predecessor, with expert testers preferring its responses 56% of the time and noting a 39% reduction in major errors on complex questions. With medium reasoning effort settings, o3-mini matches the performance of the larger o1 model on challenging reasoning evaluations like AIME and GPQA, while maintaining lower latency and cost.", + "description": "OpenAI o3-mini-high is the same model as [o3-mini](/openai/o3-mini) with reasoning_effort set to high.", "context_length": 200000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000011", - "completion": "0.0000044", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000055" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "google/gemini-2.0-flash-001", - "canonical_slug": "google/gemini-2.0-flash-001", - "hugging_face_id": "", "name": "Google: Gemini 2.0 Flash", - "created": 1738769413, - "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It introduces notable enhancements in multimodal understanding, coding capabilities, complex instruction following, and function calling. These advancements come together to deliver more seamless and robust agentic experiences.", + "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file", "audio", "video"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000004", - "request": "0", - "image": "0.0000258", - "audio": "0.0000007", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000025", - "input_cache_write": "0.0000001833" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen-vl-plus", - "canonical_slug": "qwen/qwen-vl-plus", - "hugging_face_id": "", "name": "Qwen: Qwen VL Plus", - "created": 1738731255, - "description": "Qwen's Enhanced Large Visual Language Model. Significantly upgraded for detailed recognition capabilities and text recognition abilities, supporting ultra-high pixel resolutions up to millions of pixels and extreme aspect ratios for image input. It delivers significant performance across a broad range of visual tasks.\n", + "description": "Qwen's Enhanced Large Visual Language Model.", "context_length": 7500, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000021", - "completion": "0.00000063", - "request": "0", - "image": "0.0002688", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 7500, - "max_completion_tokens": 1500, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "temperature", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "aion-labs/aion-1.0", - "canonical_slug": "aion-labs/aion-1.0", - "hugging_face_id": "", "name": "AionLabs: Aion-1.0", - "created": 1738697557, - "description": "Aion-1.0 is a multi-model system designed for high performance across various tasks, including reasoning and coding. It is built on DeepSeek-R1, augmented with additional models and techniques such as Tree of Thoughts (ToT) and Mixture of Experts (MoE). It is Aion Lab's most powerful reasoning model.", + "description": "Aion-1.0 is a multi-model system designed for high performance across various tasks, including reasoning and coding.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000004", - "completion": "0.000008", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "temperature", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "aion-labs/aion-1.0-mini", - "canonical_slug": "aion-labs/aion-1.0-mini", - "hugging_face_id": "FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview", "name": "AionLabs: Aion-1.0-Mini", - "created": 1738697107, - "description": "Aion-1.0-Mini 32B parameter model is a distilled version of the DeepSeek-R1 model, designed for strong performance in reasoning domains such as mathematics, coding, and logic. It is a modified variant of a FuseAI model that outperforms R1-Distill-Qwen-32B and R1-Distill-Llama-70B, with benchmark results available on its [Hugging Face page](https://huggingface.co/FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview), independently replicated for verification.", + "description": "Aion-1.0-Mini 32B parameter model is a distilled version of the DeepSeek-R1 model, designed for strong performance in reasoning domains such as mathematics, coding, and logic.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000007", - "completion": "0.0000014", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "include_reasoning", - "max_tokens", - "reasoning", - "temperature", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview" }, { "id": "aion-labs/aion-rp-llama-3.1-8b", - "canonical_slug": "aion-labs/aion-rp-llama-3.1-8b", - "hugging_face_id": "", "name": "AionLabs: Aion-RP 1.0 (8B)", - "created": 1738696718, - "description": "Aion-RP-Llama-3.1-8B ranks the highest in the character evaluation portion of the RPBench-Auto benchmark, a roleplaying-specific variant of Arena-Hard-Auto, where LLMs evaluate each other’s responses. It is a fine-tuned base model rather than an instruct model, designed to produce more natural and varied writing.", + "description": "Aion-RP-Llama-3.1-8B ranks the highest in the character evaluation portion of the RPBench-Auto benchmark, a roleplaying-specific variant of Arena-Hard-Auto, where LLMs evaluate each other’s responses.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "temperature", "top_p"], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "qwen/qwen-vl-max", - "canonical_slug": "qwen/qwen-vl-max-2025-01-25", - "hugging_face_id": "", "name": "Qwen: Qwen VL Max", - "created": 1738434304, - "description": "Qwen VL Max is a visual understanding model with 7500 tokens context length. It excels in delivering optimal performance for a broader spectrum of complex tasks.\n", + "description": "Qwen VL Max is a visual understanding model with 7500 tokens context length.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000008", - "completion": "0.0000032", - "request": "0", - "image": "0.001024", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "" }, { "id": "qwen/qwen-turbo", - "canonical_slug": "qwen/qwen-turbo-2024-11-01", - "hugging_face_id": "", "name": "Qwen: Qwen-Turbo", - "created": 1738410974, "description": "Qwen-Turbo, based on Qwen2.5, is a 1M context model that provides fast speed and low cost, suitable for simple tasks.", "context_length": 1000000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.0000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000002" - }, - "top_provider": { - "context_length": 1000000, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "qwen/qwen2.5-vl-72b-instruct", - "canonical_slug": "qwen/qwen2.5-vl-72b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-VL-72B-Instruct", "name": "Qwen: Qwen2.5 VL 72B Instruct", - "created": 1738410311, - "description": "Qwen2.5-VL is proficient in recognizing common objects such as flowers, birds, fish, and insects. It is also highly capable of analyzing texts, charts, icons, graphics, and layouts within images.", + "description": "Qwen2.5-VL is proficient in recognizing common objects such as flowers, birds, fish, and insects.", "context_length": 32768, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000008", - "completion": "0.00000033", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen2.5-VL-72B-Instruct" }, { "id": "qwen/qwen-plus", - "canonical_slug": "qwen/qwen-plus-2025-01-25", - "hugging_face_id": "", "name": "Qwen: Qwen-Plus", - "created": 1738409840, "description": "Qwen-Plus, based on the Qwen2.5 foundation model, is a 131K context model with a balanced performance, speed, and cost combination.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000016" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "qwen/qwen-max", - "canonical_slug": "qwen/qwen-max-2025-01-25", - "hugging_face_id": "", "name": "Qwen: Qwen-Max ", - "created": 1738402289, - "description": "Qwen-Max, based on Qwen2.5, provides the best inference performance among [Qwen models](/qwen), especially for complex multi-step tasks. It's a large-scale MoE model that has been pretrained on over 20 trillion tokens and further post-trained with curated Supervised Fine-Tuning (SFT) and Reinforcement Learning from Human Feedback (RLHF) methodologies. The parameter count is unknown.", + "description": "Qwen-Max, based on Qwen2.5, provides the best inference performance among [Qwen models](/qwen), especially for complex multi-step tasks.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000016", - "completion": "0.0000064", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000064" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/o3-mini", - "canonical_slug": "openai/o3-mini-2025-01-31", - "hugging_face_id": "", "name": "OpenAI: o3 Mini", - "created": 1738351721, - "description": "OpenAI o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding.\n\nThis model supports the `reasoning_effort` parameter, which can be set to \"high\", \"medium\", or \"low\" to control the thinking time of the model. The default is \"medium\". OpenRouter also offers the model slug `openai/o3-mini-high` to default the parameter to \"high\".\n\nThe model features three adjustable reasoning effort levels and supports key developer capabilities including function calling, structured outputs, and streaming, though it does not include vision processing capabilities.\n\nThe model demonstrates significant improvements over its predecessor, with expert testers preferring its responses 56% of the time and noting a 39% reduction in major errors on complex questions. With medium reasoning effort settings, o3-mini matches the performance of the larger o1 model on challenging reasoning evaluations like AIME and GPQA, while maintaining lower latency and cost.", + "description": "OpenAI o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding.", "context_length": 200000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000011", - "completion": "0.0000044", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000055" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/mistral-small-24b-instruct-2501:free", - "canonical_slug": "mistralai/mistral-small-24b-instruct-2501", - "hugging_face_id": "mistralai/Mistral-Small-24B-Instruct-2501", "name": "Mistral: Mistral Small 3 (free)", - "created": 1738255409, - "description": "Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks. Released under the Apache 2.0 license, it features both pre-trained and instruction-tuned versions designed for efficient local deployment.\n\nThe model achieves 81% accuracy on the MMLU benchmark and performs competitively with larger models like Llama 3.3 70B and Qwen 32B, while operating at three times the speed on equivalent hardware. [Read the blog post about the model here.](https://mistral.ai/news/mistral-small-3/)", + "description": "Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-Small-24B-Instruct-2501" }, { "id": "mistralai/mistral-small-24b-instruct-2501", - "canonical_slug": "mistralai/mistral-small-24b-instruct-2501", - "hugging_face_id": "mistralai/Mistral-Small-24B-Instruct-2501", "name": "Mistral: Mistral Small 3", - "created": 1738255409, - "description": "Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks. Released under the Apache 2.0 license, it features both pre-trained and instruction-tuned versions designed for efficient local deployment.\n\nThe model achieves 81% accuracy on the MMLU benchmark and performs competitively with larger models like Llama 3.3 70B and Qwen 32B, while operating at three times the speed on equivalent hardware. [Read the blog post about the model here.](https://mistral.ai/news/mistral-small-3/)", + "description": "Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000005", - "completion": "0.00000008", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-Small-24B-Instruct-2501" }, { "id": "deepseek/deepseek-r1-distill-qwen-32b", - "canonical_slug": "deepseek/deepseek-r1-distill-qwen-32b", - "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "name": "DeepSeek: R1 Distill Qwen 32B", - "created": 1738194830, - "description": "DeepSeek R1 Distill Qwen 32B is a distilled large language model based on [Qwen 2.5 32B](https://huggingface.co/Qwen/Qwen2.5-32B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.\\n\\nOther benchmark results include:\\n\\n- AIME 2024 pass@1: 72.6\\n- MATH-500 pass@1: 94.3\\n- CodeForces Rating: 1691\\n\\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.", + "description": "DeepSeek R1 Distill Qwen 32B is a distilled large language model based on [Qwen 2.5 32B](https://huggingface.co/Qwen/Qwen2.5-32B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1).", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.00000027", - "completion": "0.00000027", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" }, { "id": "deepseek/deepseek-r1-distill-qwen-14b", - "canonical_slug": "deepseek/deepseek-r1-distill-qwen-14b", - "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", "name": "DeepSeek: R1 Distill Qwen 14B", - "created": 1738193940, - "description": "DeepSeek R1 Distill Qwen 14B is a distilled large language model based on [Qwen 2.5 14B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.\n\nOther benchmark results include:\n\n- AIME 2024 pass@1: 69.7\n- MATH-500 pass@1: 93.9\n- CodeForces Rating: 1481\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.", + "description": "DeepSeek R1 Distill Qwen 14B is a distilled large language model based on [Qwen 2.5 14B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B), using outputs from [DeepSeek.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.00000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B" }, { "id": "perplexity/sonar-reasoning", - "canonical_slug": "perplexity/sonar-reasoning", - "hugging_face_id": "", "name": "Perplexity: Sonar Reasoning", - "created": 1738131107, - "description": "Sonar Reasoning is a reasoning model provided by Perplexity based on [DeepSeek R1](/deepseek/deepseek-r1).\n\nIt allows developers to utilize long chain of thought with built-in web search. Sonar Reasoning is uncensored and hosted in US datacenters. ", + "description": "Sonar Reasoning is a reasoning model provided by Perplexity based on [DeepSeek R1](/deepseek/deepseek-r1).", "context_length": 127000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000005", - "request": "0.005", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 127000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "temperature", - "top_k", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "perplexity/sonar", - "canonical_slug": "perplexity/sonar", - "hugging_face_id": "", "name": "Perplexity: Sonar", - "created": 1738013808, - "description": "Sonar is lightweight, affordable, fast, and simple to use — now featuring citations and the ability to customize sources. It is designed for companies seeking to integrate lightweight question-and-answer features optimized for speed.", + "description": "Sonar is lightweight, affordable, fast, and simple to use — now featuring citations and the ability to customize sources.", "context_length": 127072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000001", - "request": "0.005", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 127072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "temperature", - "top_k", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "deepseek/deepseek-r1-distill-llama-70b:free", - "canonical_slug": "deepseek/deepseek-r1-distill-llama-70b", - "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "name": "DeepSeek: R1 Distill Llama 70B (free)", - "created": 1737663169, - "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across multiple benchmarks, including:\n\n- AIME 2024 pass@1: 70.0\n- MATH-500 pass@1: 94.5\n- CodeForces Rating: 1633\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.", + "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1).", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B" }, { "id": "deepseek/deepseek-r1-distill-llama-70b", - "canonical_slug": "deepseek/deepseek-r1-distill-llama-70b", - "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "name": "DeepSeek: R1 Distill Llama 70B", - "created": 1737663169, - "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across multiple benchmarks, including:\n\n- AIME 2024 pass@1: 70.0\n- MATH-500 pass@1: 94.5\n- CodeForces Rating: 1633\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.", + "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1).", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.00000003", - "completion": "0.00000013", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B" }, { "id": "deepseek/deepseek-r1:free", - "canonical_slug": "deepseek/deepseek-r1", - "hugging_face_id": "deepseek-ai/DeepSeek-R1", "name": "DeepSeek: R1 (free)", - "created": 1737381095, - "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model & [technical report](https://api-docs.deepseek.com/news/news250120).\n\nMIT licensed: Distill & commercialize freely!", + "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-R1" }, { "id": "deepseek/deepseek-r1", - "canonical_slug": "deepseek/deepseek-r1", - "hugging_face_id": "deepseek-ai/DeepSeek-R1", "name": "DeepSeek: R1", - "created": 1737381095, - "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model & [technical report](https://api-docs.deepseek.com/news/news250120).\n\nMIT licensed: Distill & commercialize freely!", + "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": "deepseek-r1" - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "include_reasoning", - "max_tokens", - "min_p", - "presence_penalty", - "reasoning", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-R1" }, { "id": "minimax/minimax-01", - "canonical_slug": "minimax/minimax-01", - "hugging_face_id": "MiniMaxAI/MiniMax-Text-01", "name": "MiniMax: MiniMax-01", - "created": 1736915462, - "description": "MiniMax-01 is a combines MiniMax-Text-01 for text generation and MiniMax-VL-01 for image understanding. It has 456 billion parameters, with 45.9 billion parameters activated per inference, and can handle a context of up to 4 million tokens.\n\nThe text model adopts a hybrid architecture that combines Lightning Attention, Softmax Attention, and Mixture-of-Experts (MoE). The image model adopts the “ViT-MLP-LLM” framework and is trained on top of the text model.\n\nTo read more about the release, see: https://www.minimaxi.com/en/news/minimax-01-series-2", + "description": "MiniMax-01 is a combines MiniMax-Text-01 for text generation and MiniMax-VL-01 for image understanding.", "context_length": 1000192, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000011", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1000192, - "max_completion_tokens": 1000192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "temperature", "top_p"], - "default_parameters": {} + "hugging_face_id": "MiniMaxAI/MiniMax-Text-01" }, { "id": "mistralai/codestral-2501", - "canonical_slug": "mistralai/codestral-2501", - "hugging_face_id": "", "name": "Mistral: Codestral 2501", - "created": 1736895522, - "description": "[Mistral](/mistralai)'s cutting-edge language model for coding. Codestral specializes in low-latency, high-frequency tasks such as fill-in-the-middle (FIM), code correction and test generation. \n\nLearn more on their blog post: https://mistral.ai/news/codestral-2501/", + "description": "[Mistral](/mistralai)'s cutting-edge language model for coding.", "context_length": 256000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000009", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 256000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "microsoft/phi-4", - "canonical_slug": "microsoft/phi-4", - "hugging_face_id": "microsoft/phi-4", "name": "Microsoft: Phi 4", - "created": 1736489872, - "description": "[Microsoft Research](/microsoft) Phi-4 is designed to perform well in complex reasoning tasks and can operate efficiently in situations with limited memory or where quick responses are needed. \n\nAt 14 billion parameters, it was trained on a mix of high-quality synthetic datasets, data from curated websites, and academic materials. It has undergone careful improvement to follow instructions accurately and maintain strong safety standards. It works best with English language inputs.\n\nFor more information, please see [Phi-4 Technical Report](https://arxiv.org/pdf/2412.08905)\n", + "description": "[Microsoft Research](/microsoft) Phi-4 is designed to perform well in complex reasoning tasks and can operate efficiently in situations with limited memory or where quick responses are needed.", "context_length": 16384, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000006", - "completion": "0.00000014", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16384, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "microsoft/phi-4" }, { "id": "sao10k/l3.1-70b-hanami-x1", - "canonical_slug": "sao10k/l3.1-70b-hanami-x1", - "hugging_face_id": "Sao10K/L3.1-70B-Hanami-x1", "name": "Sao10K: Llama 3.1 70B Hanami x1", - "created": 1736302854, "description": "This is [Sao10K](/sao10k)'s experiment over [Euryale v2.2](/sao10k/l3.1-euryale-70b).", "context_length": 16000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000003", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Sao10K/L3.1-70B-Hanami-x1" }, { "id": "deepseek/deepseek-chat", - "canonical_slug": "deepseek/deepseek-chat-v3", - "hugging_face_id": "deepseek-ai/DeepSeek-V3", "name": "DeepSeek: DeepSeek V3", - "created": 1735241320, - "description": "DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.\n\nFor model details, please visit [the DeepSeek-V3 repo](https://github.com/deepseek-ai/DeepSeek-V3) for more information, or see the [launch announcement](https://api-docs.deepseek.com/news/news1226).", + "description": "DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions.", "context_length": 163840, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "DeepSeek", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000012", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 163840, - "max_completion_tokens": 163840, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "deepseek-ai/DeepSeek-V3" }, { "id": "sao10k/l3.3-euryale-70b", - "canonical_slug": "sao10k/l3.3-euryale-70b-v2.3", - "hugging_face_id": "Sao10K/L3.3-70B-Euryale-v2.3", "name": "Sao10K: Llama 3.3 Euryale 70B", - "created": 1734535928, - "description": "Euryale L3.3 70B is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.2](/models/sao10k/l3-euryale-70b).", + "description": "Euryale L3.3 70B is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k).", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000065", - "completion": "0.00000075", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Sao10K/L3.3-70B-Euryale-v2.3" }, { "id": "openai/o1", - "canonical_slug": "openai/o1-2024-12-17", - "hugging_face_id": "", "name": "OpenAI: o1", - "created": 1734459999, - "description": "The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding. The o1 model series is trained with large-scale reinforcement learning to reason using chain of thought. \n\nThe o1 models are optimized for math, science, programming, and other STEM-related tasks. They consistently exhibit PhD-level accuracy on benchmarks in physics, chemistry, and biology. Learn more in the [launch announcement](https://openai.com/o1).\n", + "description": "The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000015", - "completion": "0.00006", - "request": "0", - "image": "0.021675", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000075" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 100000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "structured_outputs", - "tool_choice", - "tools" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "cohere/command-r7b-12-2024", - "canonical_slug": "cohere/command-r7b-12-2024", - "hugging_face_id": "", "name": "Cohere: Command R7B (12-2024)", - "created": 1734158152, - "description": "Command R7B (12-2024) is a small, fast update of the Command R+ model, delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.\n\nUse of this model is subject to Cohere's [Usage Policy](https://docs.cohere.com/docs/usage-policy) and [SaaS Agreement](https://cohere.com/saas-agreement).", + "description": "Command R7B (12-2024) is a small, fast update of the Command R+ model, delivered in December 2024.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Cohere", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000000375", - "completion": "0.00000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "google/gemini-2.0-flash-exp:free", - "canonical_slug": "google/gemini-2.0-flash-exp", - "hugging_face_id": "", "name": "Google: Gemini 2.0 Flash Experimental (free)", - "created": 1733937523, - "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It introduces notable enhancements in multimodal understanding, coding capabilities, complex instruction following, and function calling. These advancements come together to deliver more seamless and robust agentic experiences.", + "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro.", "context_length": 1048576, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1048576, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "meta-llama/llama-3.3-70b-instruct:free", - "canonical_slug": "meta-llama/llama-3.3-70b-instruct", - "hugging_face_id": "meta-llama/Llama-3.3-70B-Instruct", "name": "Meta: Llama 3.3 70B Instruct (free)", - "created": 1733506137, - "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks.\n\nSupported languages: English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai.\n\n[Model Card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/MODEL_CARD.md)", + "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out).", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-3.3-70B-Instruct" }, { "id": "meta-llama/llama-3.3-70b-instruct", - "canonical_slug": "meta-llama/llama-3.3-70b-instruct", - "hugging_face_id": "meta-llama/Llama-3.3-70B-Instruct", "name": "Meta: Llama 3.3 70B Instruct", - "created": 1733506137, - "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks.\n\nSupported languages: English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai.\n\n[Model Card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/MODEL_CARD.md)", + "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out).", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000013", - "completion": "0.00000038", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-3.3-70B-Instruct" }, { "id": "amazon/nova-lite-v1", - "canonical_slug": "amazon/nova-lite-v1", - "hugging_face_id": "", "name": "Amazon: Nova Lite 1.0", - "created": 1733437363, - "description": "Amazon Nova Lite 1.0 is a very low-cost multimodal model from Amazon that focused on fast processing of image, video, and text inputs to generate text output. Amazon Nova Lite can handle real-time customer interactions, document analysis, and visual question-answering tasks with high accuracy.\n\nWith an input context of 300K tokens, it can analyze multiple images or up to 30 minutes of video in a single input.", + "description": "Amazon Nova Lite 1.0 is a very low-cost multimodal model from Amazon that focused on fast processing of image, video, and text inputs to generate text output.", "context_length": 300000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Nova", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000006", - "completion": "0.00000024", - "request": "0", - "image": "0.00009", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 300000, - "max_completion_tokens": 5120, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "amazon/nova-micro-v1", - "canonical_slug": "amazon/nova-micro-v1", - "hugging_face_id": "", "name": "Amazon: Nova Micro 1.0", - "created": 1733437237, - "description": "Amazon Nova Micro 1.0 is a text-only model that delivers the lowest latency responses in the Amazon Nova family of models at a very low cost. With a context length of 128K tokens and optimized for speed and cost, Amazon Nova Micro excels at tasks such as text summarization, translation, content classification, interactive chat, and brainstorming. It has simple mathematical reasoning and coding abilities.", + "description": "Amazon Nova Micro 1.0 is a text-only model that delivers the lowest latency responses in the Amazon Nova family of models at a very low cost.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Nova", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000000035", - "completion": "0.00000014", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 5120, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "amazon/nova-pro-v1", - "canonical_slug": "amazon/nova-pro-v1", - "hugging_face_id": "", "name": "Amazon: Nova Pro 1.0", - "created": 1733436303, - "description": "Amazon Nova Pro 1.0 is a capable multimodal model from Amazon focused on providing a combination of accuracy, speed, and cost for a wide range of tasks. As of December 2024, it achieves state-of-the-art performance on key benchmarks including visual question answering (TextVQA) and video understanding (VATEX).\n\nAmazon Nova Pro demonstrates strong capabilities in processing both visual and textual information and at analyzing financial documents.\n\n**NOTE**: Video input is not supported at this time.", + "description": "Amazon Nova Pro 1.0 is a capable multimodal model from Amazon focused on providing a combination of accuracy, speed, and cost for a wide range of tasks.", "context_length": 300000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Nova", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000008", - "completion": "0.0000032", - "request": "0", - "image": "0.0012", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 300000, - "max_completion_tokens": 5120, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-4o-2024-11-20", - "canonical_slug": "openai/gpt-4o-2024-11-20", - "hugging_face_id": "", "name": "OpenAI: GPT-4o (2024-11-20)", - "created": 1732127594, - "description": "The 2024-11-20 version of GPT-4o offers a leveled-up creative writing ability with more natural, engaging, and tailored writing to improve relevance & readability. It’s also better at working with uploaded files, providing deeper insights & more thorough responses.\n\nGPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.", + "description": "The 2024-11-20 version of GPT-4o offers a leveled-up creative writing ability with more natural, engaging, and tailored writing to improve relevance & readability.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.00001", - "request": "0", - "image": "0.003613", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000125" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/mistral-large-2411", - "canonical_slug": "mistralai/mistral-large-2411", - "hugging_face_id": "", "name": "Mistral Large 2411", - "created": 1731978685, - "description": "Mistral Large 2 2411 is an update of [Mistral Large 2](/mistralai/mistral-large) released together with [Pixtral Large 2411](/mistralai/pixtral-large-2411)\n\nIt provides a significant upgrade on the previous [Mistral Large 24.07](/mistralai/mistral-large-2407), with notable improvements in long context understanding, a new system prompt, and more accurate function calling.", + "description": "Mistral Large 2 2411 is an update of [Mistral Large 2](/mistralai/mistral-large) released together with [Pixtral Large 2411](/mistralai/pixtral-large-2411)\n\nIt provides a significant upgrade on the.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "mistralai/mistral-large-2407", - "canonical_slug": "mistralai/mistral-large-2407", - "hugging_face_id": "", "name": "Mistral Large 2407", - "created": 1731978415, - "description": "This is Mistral AI's flagship model, Mistral Large 2 (version mistral-large-2407). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/).\n\nIt supports dozens of languages including French, German, Spanish, Italian, Portuguese, Arabic, Hindi, Russian, Chinese, Japanese, and Korean, along with 80+ coding languages including Python, Java, C, C++, JavaScript, and Bash. Its long context window allows precise information recall from large documents.\n", + "description": "This is Mistral AI's flagship model, Mistral Large 2 (version mistral-large-2407).", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "mistralai/pixtral-large-2411", - "canonical_slug": "mistralai/pixtral-large-2411", - "hugging_face_id": "", "name": "Mistral: Pixtral Large 2411", - "created": 1731977388, - "description": "Pixtral Large is a 124B parameter, open-weight, multimodal model built on top of [Mistral Large 2](/mistralai/mistral-large-2411). The model is able to understand documents, charts and natural images.\n\nThe model is available under the Mistral Research License (MRL) for research and educational use, and the Mistral Commercial License for experimentation, testing, and production for commercial purposes.\n\n", + "description": "Pixtral Large is a 124B parameter, open-weight, multimodal model built on top of [Mistral Large 2](/mistralai/mistral-large-2411).", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000006", - "request": "0", - "image": "0.002888", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "qwen/qwen-2.5-coder-32b-instruct:free", - "canonical_slug": "qwen/qwen-2.5-coder-32b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-Coder-32B-Instruct", "name": "Qwen2.5 Coder 32B Instruct (free)", - "created": 1731368400, - "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). Qwen2.5-Coder brings the following improvements upon CodeQwen1.5:\n\n- Significantly improvements in **code generation**, **code reasoning** and **code fixing**. \n- A more comprehensive foundation for real-world applications such as **Code Agents**. Not only enhancing coding capabilities but also maintaining its strengths in mathematics and general competencies.\n\nTo read more about its evaluation results, check out [Qwen 2.5 Coder's blog](https://qwenlm.github.io/blog/qwen2.5-coder-family/).", + "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen).", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen2.5-Coder-32B-Instruct" }, { "id": "qwen/qwen-2.5-coder-32b-instruct", - "canonical_slug": "qwen/qwen-2.5-coder-32b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-Coder-32B-Instruct", "name": "Qwen2.5 Coder 32B Instruct", - "created": 1731368400, - "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). Qwen2.5-Coder brings the following improvements upon CodeQwen1.5:\n\n- Significantly improvements in **code generation**, **code reasoning** and **code fixing**. \n- A more comprehensive foundation for real-world applications such as **Code Agents**. Not only enhancing coding capabilities but also maintaining its strengths in mathematics and general competencies.\n\nTo read more about its evaluation results, check out [Qwen 2.5 Coder's blog](https://qwenlm.github.io/blog/qwen2.5-coder-family/).", + "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen).", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.00000004", - "completion": "0.00000016", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen2.5-Coder-32B-Instruct" }, { "id": "raifle/sorcererlm-8x22b", - "canonical_slug": "raifle/sorcererlm-8x22b", - "hugging_face_id": "rAIfle/SorcererLM-8x22b-bf16", "name": "SorcererLM 8x22B", - "created": 1731105083, - "description": "SorcererLM is an advanced RP and storytelling model, built as a Low-rank 16-bit LoRA fine-tuned on [WizardLM-2 8x22B](/microsoft/wizardlm-2-8x22b).\n\n- Advanced reasoning and emotional intelligence for engaging and immersive interactions\n- Vivid writing capabilities enriched with spatial and contextual awareness\n- Enhanced narrative depth, promoting creative and dynamic storytelling", + "description": "SorcererLM is an advanced RP and storytelling model, built as a Low-rank 16-bit LoRA fine-tuned on [WizardLM-2 8x22B](/microsoft/wizardlm-2-8x22b).", "context_length": 16000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "vicuna" - }, - "pricing": { - "prompt": "0.0000045", - "completion": "0.0000045", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "rAIfle/SorcererLM-8x22b-bf16" }, { "id": "thedrummer/unslopnemo-12b", - "canonical_slug": "thedrummer/unslopnemo-12b", - "hugging_face_id": "TheDrummer/UnslopNemo-12B-v4.1", "name": "TheDrummer: UnslopNemo 12B", - "created": 1731103448, "description": "UnslopNemo v4.1 is the latest addition from the creator of Rocinante, designed for adventure writing and role-play scenarios.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "TheDrummer/UnslopNemo-12B-v4.1" }, { "id": "anthropic/claude-3.5-haiku", - "canonical_slug": "anthropic/claude-3-5-haiku", - "hugging_face_id": null, "name": "Anthropic: Claude 3.5 Haiku", - "created": 1730678400, - "description": "Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use. Engineered to excel in real-time applications, it delivers quick response times that are essential for dynamic tasks such as chat interactions and immediate coding suggestions.\n\nThis makes it highly suitable for environments that demand both speed and precision, such as software development, customer service bots, and data management systems.\n\nThis model is currently pointing to [Claude 3.5 Haiku (2024-10-22)](/anthropic/claude-3-5-haiku-20241022).", + "description": "Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000008", - "completion": "0.000004", - "request": "0", - "image": "0", - "web_search": "0.01", - "internal_reasoning": "0", - "input_cache_read": "0.00000008", - "input_cache_write": "0.000001" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 8192, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "anthropic/claude-3.5-haiku-20241022", - "canonical_slug": "anthropic/claude-3-5-haiku-20241022", - "hugging_face_id": null, "name": "Anthropic: Claude 3.5 Haiku (2024-10-22)", - "created": 1730678400, - "description": "Claude 3.5 Haiku features enhancements across all skill sets including coding, tool use, and reasoning. As the fastest model in the Anthropic lineup, it offers rapid response times suitable for applications that require high interactivity and low latency, such as user-facing chatbots and on-the-fly code completions. It also excels in specialized tasks like data extraction and real-time content moderation, making it a versatile tool for a broad range of industries.\n\nIt does not support image inputs.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/3-5-models-and-computer-use)", + "description": "Claude 3.5 Haiku features enhancements across all skill sets including coding, tool use, and reasoning.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000008", - "completion": "0.000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000008", - "input_cache_write": "0.000001" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "anthracite-org/magnum-v4-72b", - "canonical_slug": "anthracite-org/magnum-v4-72b", - "hugging_face_id": "anthracite-org/magnum-v4-72b", "name": "Magnum v4 72B", - "created": 1729555200, - "description": "This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and Opus(https://openrouter.ai/anthropic/claude-3-opus).\n\nThe model is fine-tuned on top of [Qwen2.5 72B](https://openrouter.ai/qwen/qwen-2.5-72b-instruct).", + "description": "This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and.", "context_length": 16384, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16384, - "max_completion_tokens": 2048, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_a", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "anthracite-org/magnum-v4-72b" }, { "id": "anthropic/claude-3.5-sonnet", - "canonical_slug": "anthropic/claude-3.5-sonnet", - "hugging_face_id": null, "name": "Anthropic: Claude 3.5 Sonnet", - "created": 1729555200, - "description": "New Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: Scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal", + "description": "New Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0.0048", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000003", - "input_cache_write": "0.00000375" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 8192, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/ministral-3b", - "canonical_slug": "mistralai/ministral-3b", - "hugging_face_id": null, "name": "Mistral: Ministral 3B", - "created": 1729123200, - "description": "Ministral 3B is a 3B parameter model optimized for on-device and edge computing. It excels in knowledge, commonsense reasoning, and function-calling, outperforming larger models like Mistral 7B on most benchmarks. Supporting up to 128k context length, it’s ideal for orchestrating agentic workflows and specialist tasks with efficient inference.", + "description": "Ministral 3B is a 3B parameter model optimized for on-device and edge computing.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000004", - "completion": "0.00000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "mistralai/ministral-8b", - "canonical_slug": "mistralai/ministral-8b", - "hugging_face_id": null, "name": "Mistral: Ministral 8B", - "created": 1729123200, - "description": "Ministral 8B is an 8B parameter model featuring a unique interleaved sliding-window attention pattern for faster, memory-efficient inference. Designed for edge use cases, it supports up to 128k context length and excels in knowledge and reasoning tasks. It outperforms peers in the sub-10B category, making it perfect for low-latency, privacy-first applications.", + "description": "Ministral 8B is an 8B parameter model featuring a unique interleaved sliding-window attention pattern for faster, memory-efficient inference.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "qwen/qwen-2.5-7b-instruct", - "canonical_slug": "qwen/qwen-2.5-7b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-7B-Instruct", "name": "Qwen: Qwen2.5 7B Instruct", - "created": 1729036800, - "description": "Qwen2.5 7B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2:\n\n- Significantly more knowledge and has greatly improved capabilities in coding and mathematics, thanks to our specialized expert models in these domains.\n\n- Significant improvements in instruction following, generating long texts (over 8K tokens), understanding structured data (e.g, tables), and generating structured outputs especially JSON. More resilient to the diversity of system prompts, enhancing role-play implementation and condition-setting for chatbots.\n\n- Long-context Support up to 128K tokens and can generate up to 8K tokens.\n\n- Multilingual support for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "description": "Qwen2.5 7B is the latest series of Qwen large language models.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.00000004", - "completion": "0.0000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": null, - "top_p": null, - "frequency_penalty": null - } + "hugging_face_id": "Qwen/Qwen2.5-7B-Instruct" }, { "id": "nvidia/llama-3.1-nemotron-70b-instruct", - "canonical_slug": "nvidia/llama-3.1-nemotron-70b-instruct", - "hugging_face_id": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "name": "NVIDIA: Llama 3.1 Nemotron 70B Instruct", - "created": 1728950400, - "description": "NVIDIA's Llama 3.1 Nemotron 70B is a language model designed for generating precise and useful responses. Leveraging [Llama 3.1 70B](/models/meta-llama/llama-3.1-70b-instruct) architecture and Reinforcement Learning from Human Feedback (RLHF), it excels in automatic alignment benchmarks. This model is tailored for applications requiring high accuracy in helpfulness and response generation, suitable for diverse user queries across multiple domains.\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "description": "NVIDIA's Llama 3.1 Nemotron 70B is a language model designed for generating precise and useful responses.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.0000006", - "completion": "0.0000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF" }, { "id": "inflection/inflection-3-productivity", - "canonical_slug": "inflection/inflection-3-productivity", - "hugging_face_id": null, "name": "Inflection: Inflection 3 Productivity", - "created": 1728604800, - "description": "Inflection 3 Productivity is optimized for following instructions. It is better for tasks requiring JSON output or precise adherence to provided guidelines. It has access to recent news.\n\nFor emotional intelligence similar to Pi, see [Inflect 3 Pi](/inflection/inflection-3-pi)\n\nSee [Inflection's announcement](https://inflection.ai/blog/enterprise) for more details.", + "description": "Inflection 3 Productivity is optimized for following instructions.", "context_length": 8000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8000, - "max_completion_tokens": 1024, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "stop", "temperature", "top_p"], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "inflection/inflection-3-pi", - "canonical_slug": "inflection/inflection-3-pi", - "hugging_face_id": null, "name": "Inflection: Inflection 3 Pi", - "created": 1728604800, - "description": "Inflection 3 Pi powers Inflection's [Pi](https://pi.ai) chatbot, including backstory, emotional intelligence, productivity, and safety. It has access to recent news, and excels in scenarios like customer support and roleplay.\n\nPi has been trained to mirror your tone and style, if you use more emojis, so will Pi! Try experimenting with various prompts and conversation styles.", + "description": "Inflection 3 Pi powers Inflection's [Pi](https://pi.ai) chatbot, including backstory, emotional intelligence, productivity, and safety.", "context_length": 8000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8000, - "max_completion_tokens": 1024, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": ["max_tokens", "stop", "temperature", "top_p"], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "thedrummer/rocinante-12b", - "canonical_slug": "thedrummer/rocinante-12b", - "hugging_face_id": "TheDrummer/Rocinante-12B-v1.1", "name": "TheDrummer: Rocinante 12B", - "created": 1727654400, - "description": "Rocinante 12B is designed for engaging storytelling and rich prose.\n\nEarly testers have reported:\n- Expanded vocabulary with unique and expressive word choices\n- Enhanced creativity for vivid narratives\n- Adventure-filled and captivating stories", + "description": "Rocinante 12B is designed for engaging storytelling and rich prose.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.00000017", - "completion": "0.00000043", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "TheDrummer/Rocinante-12B-v1.1" }, { "id": "meta-llama/llama-3.2-90b-vision-instruct", - "canonical_slug": "meta-llama/llama-3.2-90b-vision-instruct", - "hugging_face_id": "meta-llama/Llama-3.2-90B-Vision-Instruct", "name": "Meta: Llama 3.2 90B Vision Instruct", - "created": 1727222400, - "description": "The Llama 90B Vision model is a top-tier, 90-billion-parameter multimodal model designed for the most challenging visual reasoning and language tasks. It offers unparalleled accuracy in image captioning, visual question answering, and advanced image-text comprehension. Pre-trained on vast multimodal datasets and fine-tuned with human feedback, the Llama 90B Vision is engineered to handle the most demanding image-based AI tasks.\n\nThis model is perfect for industries requiring cutting-edge multimodal AI capabilities, particularly those dealing with complex, real-time visual and textual analysis.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "description": "The Llama 90B Vision model is a top-tier, 90-billion-parameter multimodal model designed for the most challenging visual reasoning and language tasks.", "context_length": 32768, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000035", - "completion": "0.0000004", - "request": "0", - "image": "0.0005058", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-3.2-90B-Vision-Instruct" }, { "id": "meta-llama/llama-3.2-1b-instruct", - "canonical_slug": "meta-llama/llama-3.2-1b-instruct", - "hugging_face_id": "meta-llama/Llama-3.2-1B-Instruct", "name": "Meta: Llama 3.2 1B Instruct", - "created": 1727222400, - "description": "Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate efficiently in low-resource environments while maintaining strong task performance.\n\nSupporting eight core languages and fine-tunable for more, Llama 1.3B is ideal for businesses or developers seeking lightweight yet powerful AI solutions that can operate in diverse multilingual settings without the high computational demand of larger models.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "description": "Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis.", "context_length": 60000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.000000027", - "completion": "0.0000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 60000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-3.2-1B-Instruct" }, { "id": "meta-llama/llama-3.2-3b-instruct:free", - "canonical_slug": "meta-llama/llama-3.2-3b-instruct", - "hugging_face_id": "meta-llama/Llama-3.2-3B-Instruct", "name": "Meta: Llama 3.2 3B Instruct (free)", - "created": 1727222400, - "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it supports eight languages, including English, Spanish, and Hindi, and is adaptable for additional languages.\n\nTrained on 9 trillion tokens, the Llama 3.2 3B model excels in instruction-following, complex reasoning, and tool use. Its balanced performance makes it ideal for applications needing accuracy and efficiency in text generation across multilingual settings.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-3.2-3B-Instruct" }, { "id": "meta-llama/llama-3.2-3b-instruct", - "canonical_slug": "meta-llama/llama-3.2-3b-instruct", - "hugging_face_id": "meta-llama/Llama-3.2-3B-Instruct", "name": "Meta: Llama 3.2 3B Instruct", - "created": 1727222400, - "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it supports eight languages, including English, Spanish, and Hindi, and is adaptable for additional languages.\n\nTrained on 9 trillion tokens, the Llama 3.2 3B model excels in instruction-following, complex reasoning, and tool use. Its balanced performance makes it ideal for applications needing accuracy and efficiency in text generation across multilingual settings.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000002", - "completion": "0.00000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-3.2-3B-Instruct" }, { "id": "meta-llama/llama-3.2-11b-vision-instruct", - "canonical_slug": "meta-llama/llama-3.2-11b-vision-instruct", - "hugging_face_id": "meta-llama/Llama-3.2-11B-Vision-Instruct", "name": "Meta: Llama 3.2 11B Vision Instruct", - "created": 1727222400, - "description": "Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data. It excels in tasks such as image captioning and visual question answering, bridging the gap between language generation and visual reasoning. Pre-trained on a massive dataset of image-text pairs, it performs well in complex, high-accuracy image analysis.\n\nIts ability to integrate visual understanding with language processing makes it an ideal solution for industries requiring comprehensive visual-linguistic AI applications, such as content creation, AI-driven customer service, and research.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "description": "Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data.", "context_length": 131072, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.000000049", - "completion": "0.000000049", - "request": "0", - "image": "0.00007948", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Llama-3.2-11B-Vision-Instruct" }, { "id": "qwen/qwen-2.5-72b-instruct:free", - "canonical_slug": "qwen/qwen-2.5-72b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-72B-Instruct", "name": "Qwen2.5 72B Instruct (free)", - "created": 1726704000, - "description": "Qwen2.5 72B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2:\n\n- Significantly more knowledge and has greatly improved capabilities in coding and mathematics, thanks to our specialized expert models in these domains.\n\n- Significant improvements in instruction following, generating long texts (over 8K tokens), understanding structured data (e.g, tables), and generating structured outputs especially JSON. More resilient to the diversity of system prompts, enhancing role-play implementation and condition-setting for chatbots.\n\n- Long-context Support up to 128K tokens and can generate up to 8K tokens.\n\n- Multilingual support for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "description": "Qwen2.5 72B is the latest series of Qwen large language models.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen2.5-72B-Instruct" }, { "id": "qwen/qwen-2.5-72b-instruct", - "canonical_slug": "qwen/qwen-2.5-72b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-72B-Instruct", "name": "Qwen2.5 72B Instruct", - "created": 1726704000, - "description": "Qwen2.5 72B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2:\n\n- Significantly more knowledge and has greatly improved capabilities in coding and mathematics, thanks to our specialized expert models in these domains.\n\n- Significant improvements in instruction following, generating long texts (over 8K tokens), understanding structured data (e.g, tables), and generating structured outputs especially JSON. More resilient to the diversity of system prompts, enhancing role-play implementation and condition-setting for chatbots.\n\n- Long-context Support up to 128K tokens and can generate up to 8K tokens.\n\n- Multilingual support for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "description": "Qwen2.5 72B is the latest series of Qwen large language models.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.00000007", - "completion": "0.00000026", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen2.5-72B-Instruct" }, { "id": "neversleep/llama-3.1-lumimaid-8b", - "canonical_slug": "neversleep/llama-3.1-lumimaid-8b", - "hugging_face_id": "NeverSleep/Lumimaid-v0.2-8B", "name": "NeverSleep: Lumimaid v0.2 8B", - "created": 1726358400, - "description": "Lumimaid v0.2 8B is a finetune of [Llama 3.1 8B](/models/meta-llama/llama-3.1-8b-instruct) with a \"HUGE step up dataset wise\" compared to Lumimaid v0.1. Sloppy chats output were purged.\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "description": "Lumimaid v0.2 8B is a finetune of [Llama 3.1 8B](/models/meta-llama/llama-3.1-8b-instruct) with a \"HUGE step up dataset wise\" compared to Lumimaid v0.1.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000009", - "completion": "0.0000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_a", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "NeverSleep/Lumimaid-v0.2-8B" }, { "id": "mistralai/pixtral-12b", - "canonical_slug": "mistralai/pixtral-12b", - "hugging_face_id": "mistralai/Pixtral-12B-2409", "name": "Mistral: Pixtral 12B", - "created": 1725926400, - "description": "The first multi-modal, text+image-to-text model from Mistral AI. Its weights were launched via torrent: https://x.com/mistralai/status/1833758285167722836.", + "description": "The first multi-modal, text+image-to-text model from Mistral AI.", "context_length": 32768, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000001", - "request": "0", - "image": "0.0001445", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Pixtral-12B-2409" }, { "id": "cohere/command-r-08-2024", - "canonical_slug": "cohere/command-r-08-2024", - "hugging_face_id": null, "name": "Cohere: Command R (08-2024)", - "created": 1724976000, - "description": "command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use. More broadly, it is better at math, code and reasoning and is competitive with the previous version of the larger Command R+ model.\n\nRead the launch post [here](https://docs.cohere.com/changelog/command-gets-refreshed).\n\nUse of this model is subject to Cohere's [Usage Policy](https://docs.cohere.com/docs/usage-policy) and [SaaS Agreement](https://cohere.com/saas-agreement).", + "description": "command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Cohere", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.0000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "cohere/command-r-plus-08-2024", - "canonical_slug": "cohere/command-r-plus-08-2024", - "hugging_face_id": null, "name": "Cohere: Command R+ (08-2024)", - "created": 1724976000, - "description": "command-r-plus-08-2024 is an update of the [Command R+](/models/cohere/command-r-plus) with roughly 50% higher throughput and 25% lower latencies as compared to the previous Command R+ version, while keeping the hardware footprint the same.\n\nRead the launch post [here](https://docs.cohere.com/changelog/command-gets-refreshed).\n\nUse of this model is subject to Cohere's [Usage Policy](https://docs.cohere.com/docs/usage-policy) and [SaaS Agreement](https://cohere.com/saas-agreement).", + "description": "command-r-plus-08-2024 is an update of the [Command R+](/models/cohere/command-r-plus) with roughly 50% higher throughput and 25% lower latencies as compared to the previous Command R+ version, while.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Cohere", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.00001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "sao10k/l3.1-euryale-70b", - "canonical_slug": "sao10k/l3.1-euryale-70b", - "hugging_face_id": "Sao10K/L3.1-70B-Euryale-v2.2", "name": "Sao10K: Llama 3.1 Euryale 70B v2.2", - "created": 1724803200, - "description": "Euryale L3.1 70B v2.2 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.1](/models/sao10k/l3-euryale-70b).", + "description": "Euryale L3.1 70B v2.2 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k).", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000065", - "completion": "0.00000075", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Sao10K/L3.1-70B-Euryale-v2.2" }, { "id": "qwen/qwen-2.5-vl-7b-instruct", - "canonical_slug": "qwen/qwen-2-vl-7b-instruct", - "hugging_face_id": "Qwen/Qwen2.5-VL-7B-Instruct", "name": "Qwen: Qwen2.5-VL 7B Instruct", - "created": 1724803200, - "description": "Qwen2.5 VL 7B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc.\n\n- Understanding videos of 20min+: Qwen2.5-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc.\n\n- Agent that can operate your mobiles, robots, etc.: with the abilities of complex reasoning and decision making, Qwen2.5-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions.\n\n- Multilingual Support: to serve global users, besides English and Chinese, Qwen2.5-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub repo](https://github.com/QwenLM/Qwen2-VL).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "description": "Qwen2.5 VL 7B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art.", "context_length": 32768, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000002", - "request": "0", - "image": "0.0001445", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Qwen/Qwen2.5-VL-7B-Instruct" }, { "id": "microsoft/phi-3.5-mini-128k-instruct", - "canonical_slug": "microsoft/phi-3.5-mini-128k-instruct", - "hugging_face_id": "microsoft/Phi-3.5-mini-instruct", "name": "Microsoft: Phi-3.5 Mini 128K Instruct", - "created": 1724198400, - "description": "Phi-3.5 models are lightweight, state-of-the-art open models. These models were trained with Phi-3 datasets that include both synthetic data and the filtered, publicly available websites data, with a focus on high quality and reasoning-dense properties. Phi-3.5 Mini uses 3.8B parameters, and is a dense decoder-only transformer model using the same tokenizer as [Phi-3 Mini](/models/microsoft/phi-3-mini-128k-instruct).\n\nThe models underwent a rigorous enhancement process, incorporating both supervised fine-tuning, proximal policy optimization, and direct preference optimization to ensure precise instruction adherence and robust safety measures. When assessed against benchmarks that test common sense, language understanding, math, code, long context and logical reasoning, Phi-3.5 models showcased robust and state-of-the-art performance among models with less than 13 billion parameters.", + "description": "Phi-3.5 models are lightweight, state-of-the-art open models.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "phi3" - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "microsoft/Phi-3.5-mini-instruct" }, { "id": "nousresearch/hermes-3-llama-3.1-70b", - "canonical_slug": "nousresearch/hermes-3-llama-3.1-70b", - "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-70B", "name": "Nous: Hermes 3 70B Instruct", - "created": 1723939200, - "description": "Hermes 3 is a generalist language model with many improvements over [Hermes 2](/models/nousresearch/nous-hermes-2-mistral-7b-dpo), including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.\n\nHermes 3 70B is a competitive, if not superior finetune of the [Llama-3.1 70B foundation model](/models/meta-llama/llama-3.1-70b-instruct), focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.\n\nThe Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.", + "description": "Hermes 3 is a generalist language model with many improvements over [Hermes 2](/models/nousresearch/nous-hermes-2-mistral-7b-dpo), including advanced agentic capabilities, much better roleplaying,.", "context_length": 65536, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000003", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 65536, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-70B" }, { "id": "nousresearch/hermes-3-llama-3.1-405b:free", - "canonical_slug": "nousresearch/hermes-3-llama-3.1-405b", - "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-405B", "name": "Nous: Hermes 3 405B Instruct (free)", - "created": 1723766400, - "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.\n\nHermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.\n\nThe Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.\n\nHermes 3 is competitive, if not superior, to Llama-3.1 Instruct models at general capabilities, with varying strengths and weaknesses attributable between the two.", + "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-405B" }, { "id": "nousresearch/hermes-3-llama-3.1-405b", - "canonical_slug": "nousresearch/hermes-3-llama-3.1-405b", - "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-405B", "name": "Nous: Hermes 3 405B Instruct", - "created": 1723766400, - "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.\n\nHermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.\n\nThe Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.\n\nHermes 3 is competitive, if not superior, to Llama-3.1 Instruct models at general capabilities, with varying strengths and weaknesses attributable between the two.", + "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-405B" }, { "id": "openai/chatgpt-4o-latest", - "canonical_slug": "openai/chatgpt-4o-latest", - "hugging_face_id": null, "name": "OpenAI: ChatGPT-4o", - "created": 1723593600, - "description": "OpenAI ChatGPT 4o is continually updated by OpenAI to point to the current version of GPT-4o used by ChatGPT. It therefore differs slightly from the API version of [GPT-4o](/models/openai/gpt-4o) in that it has additional RLHF. It is intended for research and evaluation.\n\nOpenAI notes that this model is not suited for production use-cases as it may be removed or redirected to another model in the future.", + "description": "OpenAI ChatGPT 4o is continually updated by OpenAI to point to the current version of GPT-4o used by ChatGPT.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000005", - "completion": "0.000015", - "request": "0", - "image": "0.007225", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "sao10k/l3-lunaris-8b", - "canonical_slug": "sao10k/l3-lunaris-8b", - "hugging_face_id": "Sao10K/L3-8B-Lunaris-v1", "name": "Sao10K: Llama 3 8B Lunaris", - "created": 1723507200, - "description": "Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3. It's a strategic merge of multiple models, designed to balance creativity with improved logic and general knowledge.\n\nCreated by [Sao10k](https://huggingface.co/Sao10k), this model aims to offer an improved experience over Stheno v3.2, with enhanced creativity and logical reasoning.\n\nFor best results, use with Llama 3 Instruct context template, temperature 1.4, and min_p 0.1.", + "description": "Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3.", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000004", - "completion": "0.00000005", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Sao10K/L3-8B-Lunaris-v1" }, { "id": "openai/gpt-4o-2024-08-06", - "canonical_slug": "openai/gpt-4o-2024-08-06", - "hugging_face_id": null, "name": "OpenAI: GPT-4o (2024-08-06)", - "created": 1722902400, - "description": "The 2024-08-06 version of GPT-4o offers improved performance in structured outputs, with the ability to supply a JSON schema in the respone_format. Read more [here](https://openai.com/index/introducing-structured-outputs-in-the-api/).\n\nGPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.\n\nFor benchmarking against other models, it was briefly called [\"im-also-a-good-gpt2-chatbot\"](https://twitter.com/LiamFedus/status/1790064963966370209)", + "description": "The 2024-08-06 version of GPT-4o offers improved performance in structured outputs, with the ability to supply a JSON schema in the respone_format.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.00001", - "request": "0", - "image": "0.003613", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000125" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "meta-llama/llama-3.1-405b", - "canonical_slug": "meta-llama/llama-3.1-405b", - "hugging_face_id": "meta-llama/llama-3.1-405B", "name": "Meta: Llama 3.1 405B (base)", - "created": 1722556800, - "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This is the base 405B pre-trained version.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "none" - }, - "pricing": { - "prompt": "0.000004", - "completion": "0.000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 32768, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/llama-3.1-405B" }, { "id": "meta-llama/llama-3.1-70b-instruct", - "canonical_slug": "meta-llama/llama-3.1-70b-instruct", - "hugging_face_id": "meta-llama/Meta-Llama-3.1-70B-Instruct", "name": "Meta: Llama 3.1 70B Instruct", - "created": 1721692800, - "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 70B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.0000004", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Meta-Llama-3.1-70B-Instruct" }, { "id": "meta-llama/llama-3.1-405b-instruct", - "canonical_slug": "meta-llama/llama-3.1-405b-instruct", - "hugging_face_id": "meta-llama/Meta-Llama-3.1-405B-Instruct", "name": "Meta: Llama 3.1 405B Instruct", - "created": 1721692800, - "description": "The highly anticipated 400B class of Llama3 is here! Clocking in at 128k context with impressive eval scores, the Meta AI team continues to push the frontier of open-source LLMs.\n\nMeta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 405B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models including GPT-4o and Claude 3.5 Sonnet in evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "description": "The highly anticipated 400B class of Llama3 is here.", "context_length": 130815, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.0000035", - "completion": "0.0000035", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 130815, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Meta-Llama-3.1-405B-Instruct" }, { "id": "meta-llama/llama-3.1-8b-instruct", - "canonical_slug": "meta-llama/llama-3.1-8b-instruct", - "hugging_face_id": "meta-llama/Meta-Llama-3.1-8B-Instruct", "name": "Meta: Llama 3.1 8B Instruct", - "created": 1721692800, - "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 8B instruct-tuned version is fast and efficient.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000002", - "completion": "0.00000003", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Meta-Llama-3.1-8B-Instruct" }, { "id": "mistralai/mistral-nemo:free", - "canonical_slug": "mistralai/mistral-nemo", - "hugging_face_id": "mistralai/Mistral-Nemo-Instruct-2407", "name": "Mistral: Mistral Nemo (free)", - "created": 1721347200, - "description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.\n\nThe model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.\n\nIt supports function calling and is released under the Apache 2.0 license.", + "description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 128000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-Nemo-Instruct-2407" }, { "id": "mistralai/mistral-nemo", - "canonical_slug": "mistralai/mistral-nemo", - "hugging_face_id": "mistralai/Mistral-Nemo-Instruct-2407", "name": "Mistral: Mistral Nemo", - "created": 1721347200, - "description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.\n\nThe model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.\n\nIt supports function calling and is released under the Apache 2.0 license.", + "description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.", "context_length": 131072, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0.00000002", - "completion": "0.00000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-Nemo-Instruct-2407" }, { "id": "openai/gpt-4o-mini", - "canonical_slug": "openai/gpt-4o-mini", - "hugging_face_id": null, "name": "OpenAI: GPT-4o-mini", - "created": 1721260800, - "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.\n\nAs their most advanced small model, it is many multiples more affordable than other recent frontier models, and more than 60% cheaper than [GPT-3.5 Turbo](/models/openai/gpt-3.5-turbo). It maintains SOTA intelligence, while being significantly more cost-effective.\n\nGPT-4o mini achieves an 82% score on MMLU and presently ranks higher than GPT-4 on chat preferences [common leaderboards](https://arena.lmsys.org/).\n\nCheck out the [launch announcement](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) to learn more.\n\n#multimodal", + "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.0000006", - "request": "0", - "image": "0.000217", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000075" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-4o-mini-2024-07-18", - "canonical_slug": "openai/gpt-4o-mini-2024-07-18", - "hugging_face_id": null, "name": "OpenAI: GPT-4o-mini (2024-07-18)", - "created": 1721260800, - "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.\n\nAs their most advanced small model, it is many multiples more affordable than other recent frontier models, and more than 60% cheaper than [GPT-3.5 Turbo](/models/openai/gpt-3.5-turbo). It maintains SOTA intelligence, while being significantly more cost-effective.\n\nGPT-4o mini achieves an 82% score on MMLU and presently ranks higher than GPT-4 on chat preferences [common leaderboards](https://arena.lmsys.org/).\n\nCheck out the [launch announcement](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) to learn more.\n\n#multimodal", + "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.0000006", - "request": "0", - "image": "0.007225", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.000000075" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "google/gemma-2-27b-it", - "canonical_slug": "google/gemma-2-27b-it", - "hugging_face_id": "google/gemma-2-27b-it", "name": "Google: Gemma 2 27B", - "created": 1720828800, - "description": "Gemma 2 27B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini).\n\nGemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning.\n\nSee the [launch announcement](https://blog.google/technology/developers/google-gemma-2/) for more details. Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).", + "description": "Gemma 2 27B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini).", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": "gemma" - }, - "pricing": { - "prompt": "0.00000065", - "completion": "0.00000065", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "stop", - "structured_outputs", - "temperature", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "google/gemma-2-27b-it" }, { "id": "google/gemma-2-9b-it", - "canonical_slug": "google/gemma-2-9b-it", - "hugging_face_id": "google/gemma-2-9b-it", "name": "Google: Gemma 2 9B", - "created": 1719532800, - "description": "Gemma 2 9B by Google is an advanced, open-source language model that sets a new standard for efficiency and performance in its size class.\n\nDesigned for a wide variety of tasks, it empowers developers and researchers to build innovative applications, while maintaining accessibility, safety, and cost-effectiveness.\n\nSee the [launch announcement](https://blog.google/technology/developers/google-gemma-2/) for more details. Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).", + "description": "Gemma 2 9B by Google is an advanced, open-source language model that sets a new standard for efficiency and performance in its size class.", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Gemini", - "instruct_type": "gemma" - }, - "pricing": { - "prompt": "0.00000003", - "completion": "0.00000009", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "google/gemma-2-9b-it" }, { "id": "anthropic/claude-3.5-sonnet-20240620", - "canonical_slug": "anthropic/claude-3.5-sonnet-20240620", - "hugging_face_id": null, "name": "Anthropic: Claude 3.5 Sonnet (2024-06-20)", - "created": 1718841600, - "description": "Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: Autonomously writes, edits, and runs code with reasoning and troubleshooting\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\nFor the latest version (2024-10-23), check out [Claude 3.5 Sonnet](/anthropic/claude-3.5-sonnet).\n\n#multimodal", + "description": "Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "request": "0", - "image": "0.0048", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000003", - "input_cache_write": "0.00000375" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "sao10k/l3-euryale-70b", - "canonical_slug": "sao10k/l3-euryale-70b", - "hugging_face_id": "Sao10K/L3-70B-Euryale-v2.1", "name": "Sao10k: Llama 3 Euryale 70B v2.1", - "created": 1718668800, - "description": "Euryale 70B v2.1 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k).\n\n- Better prompt adherence.\n- Better anatomy / spatial awareness.\n- Adapts much better to unique and custom formatting / reply formats.\n- Very creative, lots of unique swipes.\n- Is not restrictive during roleplays.", + "description": "Euryale 70B v2.1 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k).", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000148", - "completion": "0.00000148", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Sao10K/L3-70B-Euryale-v2.1" }, { "id": "mistralai/mistral-7b-instruct-v0.3", - "canonical_slug": "mistralai/mistral-7b-instruct-v0.3", - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3", "name": "Mistral: Mistral 7B Instruct v0.3", - "created": 1716768000, - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\nAn improved version of [Mistral 7B Instruct v0.2](/models/mistralai/mistral-7b-instruct-v0.2), with the following changes:\n\n- Extended vocabulary to 32768\n- Supports v3 Tokenizer\n- Supports function calling\n\nNOTE: Support for function calling depends on the provider.", + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3" }, { "id": "mistralai/mistral-7b-instruct:free", - "canonical_slug": "mistralai/mistral-7b-instruct", - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3", "name": "Mistral: Mistral 7B Instruct (free)", - "created": 1716768000, - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*", + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3" }, { "id": "mistralai/mistral-7b-instruct", - "canonical_slug": "mistralai/mistral-7b-instruct", - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3", "name": "Mistral: Mistral 7B Instruct", - "created": 1716768000, - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*", + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0.000000028", - "completion": "0.000000054", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3" }, { "id": "nousresearch/hermes-2-pro-llama-3-8b", - "canonical_slug": "nousresearch/hermes-2-pro-llama-3-8b", - "hugging_face_id": "NousResearch/Hermes-2-Pro-Llama-3-8B", "name": "NousResearch: Hermes 2 Pro - Llama-3 8B", - "created": 1716768000, - "description": "Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house.", + "description": "Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON.", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.000000025", - "completion": "0.00000008", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 2048, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "NousResearch/Hermes-2-Pro-Llama-3-8B" }, { "id": "microsoft/phi-3-mini-128k-instruct", - "canonical_slug": "microsoft/phi-3-mini-128k-instruct", - "hugging_face_id": "microsoft/Phi-3-mini-128k-instruct", "name": "Microsoft: Phi-3 Mini 128K Instruct", - "created": 1716681600, - "description": "Phi-3 Mini is a powerful 3.8B parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.\n\nAt time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. This model is static, trained on an offline dataset with an October 2023 cutoff date.", + "description": "Phi-3 Mini is a powerful 3.8B parameter model designed for advanced language understanding, reasoning, and instruction following.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "phi3" - }, - "pricing": { - "prompt": "0.0000001", - "completion": "0.0000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "microsoft/Phi-3-mini-128k-instruct" }, { "id": "microsoft/phi-3-medium-128k-instruct", - "canonical_slug": "microsoft/phi-3-medium-128k-instruct", - "hugging_face_id": "microsoft/Phi-3-medium-128k-instruct", "name": "Microsoft: Phi-3 Medium 128K Instruct", - "created": 1716508800, - "description": "Phi-3 128K Medium is a powerful 14-billion parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.\n\nAt time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. In the MMLU-Pro eval, the model even comes close to a Llama3 70B level of performance.\n\nFor 4k context length, try [Phi-3 Medium 4K](/models/microsoft/phi-3-medium-4k-instruct).", + "description": "Phi-3 128K Medium is a powerful 14-billion parameter model designed for advanced language understanding, reasoning, and instruction following.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Other", - "instruct_type": "phi3" - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000001", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "microsoft/Phi-3-medium-128k-instruct" }, { "id": "openai/gpt-4o", - "canonical_slug": "openai/gpt-4o", - "hugging_face_id": null, "name": "OpenAI: GPT-4o", - "created": 1715558400, - "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.\n\nFor benchmarking against other models, it was briefly called [\"im-also-a-good-gpt2-chatbot\"](https://twitter.com/LiamFedus/status/1790064963966370209)\n\n#multimodal", + "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000025", - "completion": "0.00001", - "request": "0", - "image": "0.003613", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000125" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-4o:extended", - "canonical_slug": "openai/gpt-4o", - "hugging_face_id": null, "name": "OpenAI: GPT-4o (extended)", - "created": 1715558400, - "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.\n\nFor benchmarking against other models, it was briefly called [\"im-also-a-good-gpt2-chatbot\"](https://twitter.com/LiamFedus/status/1790064963966370209)\n\n#multimodal", + "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000006", - "completion": "0.000018", - "request": "0", - "image": "0.007225", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 64000, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-4o-2024-05-13", - "canonical_slug": "openai/gpt-4o-2024-05-13", - "hugging_face_id": null, "name": "OpenAI: GPT-4o (2024-05-13)", - "created": 1715558400, - "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.\n\nFor benchmarking against other models, it was briefly called [\"im-also-a-good-gpt2-chatbot\"](https://twitter.com/LiamFedus/status/1790064963966370209)\n\n#multimodal", + "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image", "file"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000005", - "completion": "0.000015", - "request": "0", - "image": "0.007225", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p", - "web_search_options" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "meta-llama/llama-guard-2-8b", - "canonical_slug": "meta-llama/llama-guard-2-8b", - "hugging_face_id": "meta-llama/Meta-Llama-Guard-2-8B", "name": "Meta: LlamaGuard 2 8B", - "created": 1715558400, - "description": "This safeguard model has 8B parameters and is based on the Llama 3 family. Just like is predecessor, [LlamaGuard 1](https://huggingface.co/meta-llama/LlamaGuard-7b), it can do both prompt and response classification.\n\nLlamaGuard 2 acts as a normal LLM would, generating text that indicates whether the given input/output is safe/unsafe. If deemed unsafe, it will also share the content categories violated.\n\nFor best results, please use raw prompt input or the `/completions` endpoint, instead of the chat API.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "description": "This safeguard model has 8B parameters and is based on the Llama 3 family.", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "none" - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Meta-Llama-Guard-2-8B" }, { "id": "meta-llama/llama-3-8b-instruct", - "canonical_slug": "meta-llama/llama-3-8b-instruct", - "hugging_face_id": "meta-llama/Meta-Llama-3-8B-Instruct", "name": "Meta: Llama 3 8B Instruct", - "created": 1713398400, - "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors.", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000003", - "completion": "0.00000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Meta-Llama-3-8B-Instruct" }, { "id": "meta-llama/llama-3-70b-instruct", - "canonical_slug": "meta-llama/llama-3-70b-instruct", - "hugging_face_id": "meta-llama/Meta-Llama-3-70B-Instruct", "name": "Meta: Llama 3 70B Instruct", - "created": 1713398400, - "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 70B instruct-tuned version was optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors.", "context_length": 8192, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.0000003", - "completion": "0.0000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "meta-llama/Meta-Llama-3-70B-Instruct" }, { "id": "mistralai/mixtral-8x22b-instruct", - "canonical_slug": "mistralai/mixtral-8x22b-instruct", - "hugging_face_id": "mistralai/Mixtral-8x22B-Instruct-v0.1", "name": "Mistral: Mixtral 8x22B Instruct", - "created": 1713312000, - "description": "Mistral's official instruct fine-tuned version of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b). It uses 39B active parameters out of 141B, offering unparalleled cost efficiency for its size. Its strengths include:\n- strong math, coding, and reasoning\n- large context length (64k)\n- fluency in English, French, Italian, German, and Spanish\n\nSee benchmarks on the launch announcement [here](https://mistral.ai/news/mixtral-8x22b/).\n#moe", + "description": "Mistral's official instruct fine-tuned version of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b).", "context_length": 65536, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 65536, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mixtral-8x22B-Instruct-v0.1" }, { "id": "microsoft/wizardlm-2-8x22b", - "canonical_slug": "microsoft/wizardlm-2-8x22b", - "hugging_face_id": "microsoft/WizardLM-2-8x22B", "name": "WizardLM-2 8x22B", - "created": 1713225600, - "description": "WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models.\n\nIt is an instruct finetune of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b).\n\nTo read more about the model release, [click here](https://wizardlm.github.io/WizardLM2/).\n\n#moe", + "description": "WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model.", "context_length": 65536, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "vicuna" - }, - "pricing": { - "prompt": "0.00000048", - "completion": "0.00000048", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 65536, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "microsoft/WizardLM-2-8x22B" }, { "id": "openai/gpt-4-turbo", - "canonical_slug": "openai/gpt-4-turbo", - "hugging_face_id": null, "name": "OpenAI: GPT-4 Turbo", - "created": 1712620800, - "description": "The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to December 2023.", + "description": "The latest GPT-4 Turbo model with vision capabilities.", "context_length": 128000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00001", - "completion": "0.00003", - "request": "0", - "image": "0.01445", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "anthropic/claude-3-haiku", - "canonical_slug": "anthropic/claude-3-haiku", - "hugging_face_id": null, "name": "Anthropic: Claude 3 Haiku", - "created": 1710288000, - "description": "Claude 3 Haiku is Anthropic's fastest and most compact model for\nnear-instant responsiveness. Quick and accurate targeted performance.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-haiku)\n\n#multimodal", + "description": "Claude 3 Haiku is Anthropic's fastest and most compact model for\nnear-instant responsiveness.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000025", - "completion": "0.00000125", - "request": "0", - "image": "0.0004", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.00000003", - "input_cache_write": "0.0000003" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "anthropic/claude-3-opus", - "canonical_slug": "anthropic/claude-3-opus", - "hugging_face_id": null, "name": "Anthropic: Claude 3 Opus", - "created": 1709596800, - "description": "Claude 3 Opus is Anthropic's most powerful model for highly complex tasks. It boasts top-level performance, intelligence, fluency, and understanding.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-family)\n\n#multimodal", + "description": "Claude 3 Opus is Anthropic's most powerful model for highly complex tasks.", "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "input_modalities": ["text", "image"], - "output_modalities": ["text"], - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000015", - "completion": "0.000075", - "request": "0", - "image": "0.024", - "web_search": "0", - "internal_reasoning": "0", - "input_cache_read": "0.0000015", - "input_cache_write": "0.00001875" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "max_tokens", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/mistral-large", - "canonical_slug": "mistralai/mistral-large", - "hugging_face_id": null, "name": "Mistral Large", - "created": 1708905600, - "description": "This is Mistral AI's flagship model, Mistral Large 2 (version `mistral-large-2407`). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/).\n\nIt supports dozens of languages including French, German, Spanish, Italian, Portuguese, Arabic, Hindi, Russian, Chinese, Japanese, and Korean, along with 80+ coding languages including Python, Java, C, C++, JavaScript, and Bash. Its long context window allows precise information recall from large documents.", + "description": "This is Mistral AI's flagship model, Mistral Large 2 (version `mistral-large-2407`).", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000002", - "completion": "0.000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "openai/gpt-4-turbo-preview", - "canonical_slug": "openai/gpt-4-turbo-preview", - "hugging_face_id": null, "name": "OpenAI: GPT-4 Turbo Preview", - "created": 1706140800, - "description": "The preview GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Dec 2023.\n\n**Note:** heavily rate limited by OpenAI while in preview.", + "description": "The preview GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00001", - "completion": "0.00003", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-3.5-turbo-0613", - "canonical_slug": "openai/gpt-3.5-turbo-0613", - "hugging_face_id": null, "name": "OpenAI: GPT-3.5 Turbo (older v0613)", - "created": 1706140800, - "description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.", + "description": "GPT-3.5 Turbo is OpenAI's fastest model.", "context_length": 4095, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4095, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/mistral-small", - "canonical_slug": "mistralai/mistral-small", - "hugging_face_id": null, "name": "Mistral Small", - "created": 1704844800, - "description": "With 22 billion parameters, Mistral Small v24.09 offers a convenient mid-point between (Mistral NeMo 12B)[/mistralai/mistral-nemo] and (Mistral Large 2)[/mistralai/mistral-large], providing a cost-effective solution that can be deployed across various platforms and environments. It has better reasoning, exhibits more capabilities, can produce and reason about code, and is multiligual, supporting English, French, German, Italian, and Spanish.", + "description": "With 22 billion parameters, Mistral Small v24.09 offers a convenient mid-point between (Mistral NeMo 12B)[/mistralai/mistral-nemo] and (Mistral Large 2)[/mistralai/mistral-large], providing a.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "mistralai/mistral-tiny", - "canonical_slug": "mistralai/mistral-tiny", - "hugging_face_id": null, "name": "Mistral Tiny", - "created": 1704844800, - "description": "Note: This model is being deprecated. Recommended replacement is the newer [Ministral 8B](/mistral/ministral-8b)\n\nThis model is currently powered by Mistral-7B-v0.2, and incorporates a \"better\" fine-tuning than [Mistral 7B](/models/mistralai/mistral-7b-instruct-v0.1), inspired by community work. It's best used for large batch processing tasks where cost is a significant factor but reasoning capabilities are not crucial.", + "description": "Note: This model is being deprecated.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000025", - "completion": "0.00000025", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "" }, { "id": "mistralai/mistral-7b-instruct-v0.2", - "canonical_slug": "mistralai/mistral-7b-instruct-v0.2", - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.2", "name": "Mistral: Mistral 7B Instruct v0.2", - "created": 1703721600, - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\nAn improved version of [Mistral 7B Instruct](/modelsmistralai/mistral-7b-instruct-v0.1), with the following changes:\n\n- 32k context window (vs 8k context in v0.1)\n- Rope-theta = 1e6\n- No Sliding-Window Attention", + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "stop", - "temperature", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.2" }, { "id": "mistralai/mixtral-8x7b-instruct", - "canonical_slug": "mistralai/mixtral-8x7b-instruct", - "hugging_face_id": "mistralai/Mixtral-8x7B-Instruct-v0.1", "name": "Mistral: Mixtral 8x7B Instruct", - "created": 1702166400, - "description": "Mixtral 8x7B Instruct is a pretrained generative Sparse Mixture of Experts, by Mistral AI, for chat and instruction use. Incorporates 8 experts (feed-forward networks) for a total of 47 billion parameters.\n\nInstruct model fine-tuned by Mistral. #moe", + "description": "Mixtral 8x7B Instruct is a pretrained generative Sparse Mixture of Experts, by Mistral AI, for chat and instruction use.", "context_length": 32768, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0.00000054", - "completion": "0.00000054", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 16384, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mixtral-8x7B-Instruct-v0.1" }, { "id": "neversleep/noromaid-20b", - "canonical_slug": "neversleep/noromaid-20b", - "hugging_face_id": "NeverSleep/Noromaid-20b-v0.1.1", "name": "Noromaid 20B", - "created": 1700956800, - "description": "A collab between IkariDev and Undi. This merge is suitable for RP, ERP, and general knowledge.\n\n#merge #uncensored", + "description": "A collab between IkariDev and Undi.", "context_length": 4096, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama2", - "instruct_type": "alpaca" - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.00000175", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4096, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_a", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "NeverSleep/Noromaid-20b-v0.1.1" }, { "id": "alpindale/goliath-120b", - "canonical_slug": "alpindale/goliath-120b", - "hugging_face_id": "alpindale/goliath-120b", "name": "Goliath 120B", - "created": 1699574400, - "description": "A large LLM created by combining two fine-tuned Llama 70B models into one 120B model. Combines Xwin and Euryale.\n\nCredits to\n- [@chargoddard](https://huggingface.co/chargoddard) for developing the framework used to merge the model - [mergekit](https://github.com/cg123/mergekit).\n- [@Undi95](https://huggingface.co/Undi95) for helping with the merge ratios.\n\n#merge", + "description": "A large LLM created by combining two fine-tuned Llama 70B models into one 120B model.", "context_length": 6144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama2", - "instruct_type": "airoboros" - }, - "pricing": { - "prompt": "0.000006", - "completion": "0.000008", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 6144, - "max_completion_tokens": 1024, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_a", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "alpindale/goliath-120b" }, { "id": "openrouter/auto", - "canonical_slug": "openrouter/auto", - "hugging_face_id": null, "name": "Auto Router", - "created": 1699401600, - "description": "Your prompt will be processed by a meta-model and routed to one of dozens of models (see below), optimizing for the best possible output.\n\nTo see which model was used, visit [Activity](/activity), or read the `model` attribute of the response. Your response will be priced at the same rate as the routed model.\n\nThe meta-model is powered by [Not Diamond](https://docs.notdiamond.ai/docs/how-not-diamond-works). Learn more in our [docs](/docs/model-routing).\n\nRequests will be routed to the following models:\n- [openai/gpt-5](/openai/gpt-5)\n- [openai/gpt-5-mini](/openai/gpt-5-mini)\n- [openai/gpt-5-nano](/openai/gpt-5-nano)\n- [openai/gpt-4.1-nano](/openai/gpt-4.1-nano)\n- [openai/gpt-4.1](/openai/gpt-4.1)\n- [openai/gpt-4.1-mini](/openai/gpt-4.1-mini)\n- [openai/gpt-4.1](/openai/gpt-4.1)\n- [openai/gpt-4o-mini](/openai/gpt-4o-mini)\n- [openai/chatgpt-4o-latest](/openai/chatgpt-4o-latest)\n- [anthropic/claude-3.5-haiku](/anthropic/claude-3.5-haiku)\n- [anthropic/claude-opus-4-1](/anthropic/claude-opus-4-1)\n- [anthropic/claude-sonnet-4-0](/anthropic/claude-sonnet-4-0)\n- [anthropic/claude-3-7-sonnet-latest](/anthropic/claude-3-7-sonnet-latest)\n- [google/gemini-2.5-pro](/google/gemini-2.5-pro)\n- [google/gemini-2.5-flash](/google/gemini-2.5-flash)\n- [mistral/mistral-large-latest](/mistral/mistral-large-latest)\n- [mistral/mistral-medium-latest](/mistral/mistral-medium-latest)\n- [mistral/mistral-small-latest](/mistral/mistral-small-latest)\n- [mistralai/mistral-nemo](/mistralai/mistral-nemo)\n- [x-ai/grok-3](/x-ai/grok-3)\n- [x-ai/grok-3-mini](/x-ai/grok-3-mini)\n- [x-ai/grok-4](/x-ai/grok-4)\n- [deepseek/deepseek-r1](/deepseek/deepseek-r1)\n- [meta-llama/llama-3.1-70b-instruct](/meta-llama/llama-3.1-70b-instruct)\n- [meta-llama/llama-3.1-405b-instruct](/meta-llama/llama-3.1-405b-instruct)\n- [mistralai/mixtral-8x22b-instruct](/mistralai/mixtral-8x22b-instruct)\n- [perplexity/sonar](/perplexity/sonar)\n- [cohere/command-r-plus](/cohere/command-r-plus)\n- [cohere/command-r](/cohere/command-r)", + "description": "Your prompt will be processed by a meta-model and routed to one of dozens of models (see below), optimizing for the best possible output.", "context_length": 2000000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Router", - "instruct_type": null - }, - "pricing": { - "prompt": "-1", - "completion": "-1" - }, - "top_provider": { - "context_length": null, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-4-1106-preview", - "canonical_slug": "openai/gpt-4-1106-preview", - "hugging_face_id": null, "name": "OpenAI: GPT-4 Turbo (older v1106)", - "created": 1699228800, - "description": "The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to April 2023.", + "description": "The latest GPT-4 Turbo model with vision capabilities.", "context_length": 128000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00001", - "completion": "0.00003", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mistralai/mistral-7b-instruct-v0.1", - "canonical_slug": "mistralai/mistral-7b-instruct-v0.1", - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.1", "name": "Mistral: Mistral 7B Instruct v0.1", - "created": 1695859200, "description": "A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length.", "context_length": 2824, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0.00000011", - "completion": "0.00000019", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 2824, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "seed", - "stop", - "temperature", - "tool_choice", - "tools", - "top_k", - "top_p" - ], - "default_parameters": { - "temperature": 0.3 - } + "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.1" }, { "id": "openai/gpt-3.5-turbo-instruct", - "canonical_slug": "openai/gpt-3.5-turbo-instruct", - "hugging_face_id": null, "name": "OpenAI: GPT-3.5 Turbo Instruct", - "created": 1695859200, - "description": "This model is a variant of GPT-3.5 Turbo tuned for instructional prompts and omitting chat-related optimizations. Training data: up to Sep 2021.", + "description": "This model is a variant of GPT-3.5 Turbo tuned for instructional prompts and omitting chat-related optimizations.", "context_length": 4095, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.0000015", - "completion": "0.000002", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4095, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-3.5-turbo-16k", - "canonical_slug": "openai/gpt-3.5-turbo-16k", - "hugging_face_id": null, "name": "OpenAI: GPT-3.5 Turbo 16k", - "created": 1693180800, - "description": "This model offers four times the context length of gpt-3.5-turbo, allowing it to support approximately 20 pages of text in a single request at a higher cost. Training data: up to Sep 2021.", + "description": "This model offers four times the context length of gpt-3.5-turbo, allowing it to support approximately 20 pages of text in a single request at a higher cost.", "context_length": 16385, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000004", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16385, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "mancer/weaver", - "canonical_slug": "mancer/weaver", - "hugging_face_id": null, "name": "Mancer: Weaver (alpha)", - "created": 1690934400, - "description": "An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory. Meant for use in roleplay/narrative situations.", + "description": "An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory.", "context_length": 8000, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama2", - "instruct_type": "alpaca" - }, - "pricing": { - "prompt": "0.000001125", - "completion": "0.000001125", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8000, - "max_completion_tokens": 2000, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "temperature", - "top_a", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "undi95/remm-slerp-l2-13b", - "canonical_slug": "undi95/remm-slerp-l2-13b", - "hugging_face_id": "Undi95/ReMM-SLERP-L2-13B", "name": "ReMM SLERP 13B", - "created": 1689984000, - "description": "A recreation trial of the original MythoMax-L2-B13 but with updated models. #merge", + "description": "A recreation trial of the original MythoMax-L2-B13 but with updated models.", "context_length": 6144, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama2", - "instruct_type": "alpaca" - }, - "pricing": { - "prompt": "0.00000045", - "completion": "0.00000065", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 6144, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_a", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Undi95/ReMM-SLERP-L2-13B" }, { "id": "gryphe/mythomax-l2-13b", - "canonical_slug": "gryphe/mythomax-l2-13b", - "hugging_face_id": "Gryphe/MythoMax-L2-13b", "name": "MythoMax 13B", - "created": 1688256000, - "description": "One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay. #merge", + "description": "One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.", "context_length": 4096, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "Llama2", - "instruct_type": "alpaca" - }, - "pricing": { - "prompt": "0.00000006", - "completion": "0.00000006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4096, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "min_p", - "presence_penalty", - "repetition_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "top_a", - "top_k", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "Gryphe/MythoMax-L2-13b" }, { "id": "openai/gpt-4-0314", - "canonical_slug": "openai/gpt-4-0314", - "hugging_face_id": null, "name": "OpenAI: GPT-4 (older v0314)", - "created": 1685232000, - "description": "GPT-4-0314 is the first version of GPT-4 released, with a context length of 8,192 tokens, and was supported until June 14. Training data: up to Sep 2021.", + "description": "GPT-4-0314 is the first version of GPT-4 released, with a context length of 8,192 tokens, and was supported until June 14.", "context_length": 8191, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00003", - "completion": "0.00006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8191, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-4", - "canonical_slug": "openai/gpt-4", - "hugging_face_id": null, "name": "OpenAI: GPT-4", - "created": 1685232000, - "description": "OpenAI's flagship model, GPT-4 is a large-scale multimodal language model capable of solving difficult problems with greater accuracy than previous models due to its broader general knowledge and advanced reasoning capabilities. Training data: up to Sep 2021.", + "description": "OpenAI's flagship model, GPT-4 is a large-scale multimodal language model capable of solving difficult problems with greater accuracy than previous models due to its broader general knowledge and.", "context_length": 8191, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00003", - "completion": "0.00006", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8191, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" }, { "id": "openai/gpt-3.5-turbo", - "canonical_slug": "openai/gpt-3.5-turbo", - "hugging_face_id": null, "name": "OpenAI: GPT-3.5 Turbo", - "created": 1685232000, - "description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.", + "description": "GPT-3.5 Turbo is OpenAI's fastest model.", "context_length": 16385, - "architecture": { - "modality": "text-\u003Etext", - "input_modalities": ["text"], - "output_modalities": ["text"], - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000005", - "completion": "0.0000015", - "request": "0", - "image": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16385, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null, - "supported_parameters": [ - "frequency_penalty", - "logit_bias", - "logprobs", - "max_tokens", - "presence_penalty", - "response_format", - "seed", - "stop", - "structured_outputs", - "temperature", - "tool_choice", - "tools", - "top_logprobs", - "top_p" - ], - "default_parameters": {} + "hugging_face_id": "" } ] } From f054bc1ab38870a1dadb5e4f54d5e03265cd0a44 Mon Sep 17 00:00:00 2001 From: Parthasarathy Date: Wed, 26 Nov 2025 23:04:07 +0530 Subject: [PATCH 7/8] feat: add initialization function for OpenRouter models - Replaced the import of openRouterModelsList with getOpenRouterModelsList. - Added a new function `initializeOpenRouterModels` to fetch and initialize OpenRouter models from the API. - Updated the providers object with the fetched models upon successful retrieval. - Added error handling to reset the models list in case of a fetch failure. --- gui/src/forms/AddModelForm.tsx | 4 + .../AddNewModel/configs/openRouterModel.ts | 61 +- .../AddNewModel/configs/openRouterModels.json | 2412 ----------------- .../pages/AddNewModel/configs/providers.ts | 22 +- 4 files changed, 71 insertions(+), 2428 deletions(-) delete mode 100644 gui/src/pages/AddNewModel/configs/openRouterModels.json diff --git a/gui/src/forms/AddModelForm.tsx b/gui/src/forms/AddModelForm.tsx index 3d24816c4d0..239e7e78488 100644 --- a/gui/src/forms/AddModelForm.tsx +++ b/gui/src/forms/AddModelForm.tsx @@ -9,6 +9,7 @@ import { IdeMessengerContext } from "../context/IdeMessenger"; import { completionParamsInputs } from "../pages/AddNewModel/configs/completionParamsInputs"; import { DisplayInfo } from "../pages/AddNewModel/configs/models"; import { + initializeOpenRouterModels, ProviderInfo, providers, } from "../pages/AddNewModel/configs/providers"; @@ -40,6 +41,9 @@ export function AddModelForm({ const formMethods = useForm(); const ideMessenger = useContext(IdeMessengerContext); + // Initialize OpenRouter models from API + void initializeOpenRouterModels(); + const popularProviderTitles = [ providers["openai"]?.title || "", providers["anthropic"]?.title || "", diff --git a/gui/src/pages/AddNewModel/configs/openRouterModel.ts b/gui/src/pages/AddNewModel/configs/openRouterModel.ts index 2c5ba5b4ac2..f0d7e745a49 100644 --- a/gui/src/pages/AddNewModel/configs/openRouterModel.ts +++ b/gui/src/pages/AddNewModel/configs/openRouterModel.ts @@ -1,5 +1,4 @@ import { ModelPackage } from "./models"; -import openRouterModelsData from "./openRouterModels.json"; interface OpenRouterModel { id: string; @@ -30,21 +29,45 @@ function convertOpenRouterModelToPackage(model: OpenRouterModel): ModelPackage { } /** - * Generate ModelPackage objects from OpenRouter models JSON + * Fetch OpenRouter models from the API */ -export function generateOpenRouterModels(): { - [key: string]: ModelPackage; -} { - const models: { [key: string]: ModelPackage } = {}; +async function fetchOpenRouterModelsFromAPI(): Promise { + const OPENROUTER_API_URL = "https://openrouter.ai/api/v1/models"; + + try { + const response = await fetch(OPENROUTER_API_URL); + + if (!response.ok) { + throw new Error( + `Failed to fetch OpenRouter models: ${response.status} ${response.statusText}`, + ); + } - const data = openRouterModelsData as { data: OpenRouterModel[] }; + const data = await response.json(); - if (!data.data || !Array.isArray(data.data)) { - console.warn("Invalid OpenRouter models data structure"); - return models; + if (!data.data || !Array.isArray(data.data)) { + console.warn("Invalid OpenRouter models data structure from API"); + return []; + } + + return data.data; + } catch (error) { + console.error("Error fetching OpenRouter models from API:", error); + return []; } +} - data.data.forEach((model: OpenRouterModel) => { +/** + * Generate ModelPackage objects from OpenRouter models API + */ +async function generateOpenRouterModels(): Promise<{ + [key: string]: ModelPackage; +}> { + const models: { [key: string]: ModelPackage } = {}; + + const apiModels = await fetchOpenRouterModelsFromAPI(); + + apiModels.forEach((model: OpenRouterModel) => { if (!model.id || !model.name) { console.warn("Skipping model with missing id or name", model); return; @@ -64,11 +87,19 @@ export function generateOpenRouterModels(): { } /** - * Export all OpenRouter models as a pre-generated object + * Export a function to fetch all OpenRouter models + * This returns a promise since we're now fetching from the API */ -export const openRouterModels = generateOpenRouterModels(); +export async function getOpenRouterModels(): Promise<{ + [key: string]: ModelPackage; +}> { + return generateOpenRouterModels(); +} /** - * Export OpenRouter models as an array for use in provider packages + * Export a function to get OpenRouter models as an array */ -export const openRouterModelsList = Object.values(openRouterModels); +export async function getOpenRouterModelsList(): Promise { + const models = await getOpenRouterModels(); + return Object.values(models); +} diff --git a/gui/src/pages/AddNewModel/configs/openRouterModels.json b/gui/src/pages/AddNewModel/configs/openRouterModels.json deleted file mode 100644 index e4c6fe0e63e..00000000000 --- a/gui/src/pages/AddNewModel/configs/openRouterModels.json +++ /dev/null @@ -1,2412 +0,0 @@ -{ - "data": [ - { - "id": "openai/gpt-5.1", - "name": "OpenAI: GPT-5.1", - "description": "GPT-5.1 is the latest frontier-grade model in the GPT-5 series, offering stronger general-purpose reasoning, improved instruction adherence, and a more natural conversational style compared to GPT-5.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-5.1-chat", - "name": "OpenAI: GPT-5.1 Chat", - "description": "GPT-5.1 Chat (AKA Instant is the fast, lightweight member of the 5.1 family, optimized for low-latency chat while retaining strong general intelligence.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-5.1-codex", - "name": "OpenAI: GPT-5.1-Codex", - "description": "GPT-5.1-Codex is a specialized version of GPT-5.1 optimized for software engineering and coding workflows.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-5.1-codex-mini", - "name": "OpenAI: GPT-5.1-Codex-Mini", - "description": "GPT-5.1-Codex-Mini is a smaller and faster version of GPT-5.1-Codex.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "kwaipilot/kat-coder-pro:free", - "name": "Kwaipilot: KAT-Coder-Pro V1 (free)", - "description": "KAT-Coder-Pro V1 is KwaiKAT's most advanced agentic coding model in the KAT-Coder series.", - "context_length": 256000, - "hugging_face_id": "" - }, - { - "id": "moonshotai/kimi-linear-48b-a3b-instruct", - "name": "MoonshotAI: Kimi Linear 48B A3B Instruct", - "description": "Kimi Linear is a hybrid linear attention architecture that outperforms traditional full attention methods across various contexts, including short, long, and reinforcement learning (RL) scaling.", - "context_length": 1048576, - "hugging_face_id": "moonshotai/Kimi-Linear-48B-A3B-Instruct" - }, - { - "id": "moonshotai/kimi-k2-thinking", - "name": "MoonshotAI: Kimi K2 Thinking", - "description": "Kimi K2 Thinking is Moonshot AI’s most advanced open reasoning model to date, extending the K2 series into agentic, long-horizon reasoning.", - "context_length": 262144, - "hugging_face_id": "moonshotai/Kimi-K2-Thinking" - }, - { - "id": "amazon/nova-premier-v1", - "name": "Amazon: Nova Premier 1.0", - "description": "Amazon Nova Premier is the most capable of Amazon’s multimodal models for complex reasoning tasks and for use as the best teacher for distilling custom models.", - "context_length": 1000000, - "hugging_face_id": "" - }, - { - "id": "perplexity/sonar-pro-search", - "name": "Perplexity: Sonar Pro Search", - "description": "Exclusively available on the OpenRouter API, Sonar Pro's new Pro Search mode is Perplexity's most advanced agentic search system.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "mistralai/voxtral-small-24b-2507", - "name": "Mistral: Voxtral Small 24B 2507", - "description": "Voxtral Small is an enhancement of Mistral Small 3, incorporating state-of-the-art audio input capabilities while retaining best-in-class text performance.", - "context_length": 32000, - "hugging_face_id": "mistralai/Voxtral-Small-24B-2507" - }, - { - "id": "openai/gpt-oss-safeguard-20b", - "name": "OpenAI: gpt-oss-safeguard-20b", - "description": "gpt-oss-safeguard-20b is a safety reasoning model from OpenAI built upon gpt-oss-20b.", - "context_length": 131072, - "hugging_face_id": "openai/gpt-oss-safeguard-20b" - }, - { - "id": "nvidia/nemotron-nano-12b-v2-vl:free", - "name": "NVIDIA: Nemotron Nano 12B 2 VL (free)", - "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence.", - "context_length": 128000, - "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16" - }, - { - "id": "nvidia/nemotron-nano-12b-v2-vl", - "name": "NVIDIA: Nemotron Nano 12B 2 VL", - "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence.", - "context_length": 131072, - "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16" - }, - { - "id": "minimax/minimax-m2", - "name": "MiniMax: MiniMax M2", - "description": "MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows.", - "context_length": 204800, - "hugging_face_id": "MiniMaxAI/MiniMax-M2" - }, - { - "id": "liquid/lfm2-8b-a1b", - "name": "LiquidAI/LFM2-8B-A1B", - "description": "Model created via inbox interface.", - "context_length": 32768, - "hugging_face_id": "LiquidAI/LFM2-8B-A1B" - }, - { - "id": "liquid/lfm-2.2-6b", - "name": "LiquidAI/LFM2-2.6B", - "description": "LFM2 is a new generation of hybrid models developed by Liquid AI, specifically designed for edge AI and on-device deployment.", - "context_length": 32768, - "hugging_face_id": "LiquidAI/LFM2-2.6B" - }, - { - "id": "ibm-granite/granite-4.0-h-micro", - "name": "IBM: Granite 4.0 Micro", - "description": "Granite-4.0-H-Micro is a 3B parameter from the Granite 4 family of models.", - "context_length": 131000, - "hugging_face_id": "ibm-granite/granite-4.0-h-micro" - }, - { - "id": "deepcogito/cogito-v2-preview-llama-405b", - "name": "Deep Cogito: Cogito V2 Preview Llama 405B", - "description": "Cogito v2 405B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection.", - "context_length": 32768, - "hugging_face_id": "deepcogito/cogito-v2-preview-llama-405B" - }, - { - "id": "openai/gpt-5-image-mini", - "name": "OpenAI: GPT-5 Image Mini", - "description": "GPT-5 Image Mini combines OpenAI's advanced language capabilities, powered by [GPT-5 Mini](https://openrouter.ai/openai/gpt-5-mini), with GPT Image 1 Mini for efficient image generation.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "anthropic/claude-haiku-4.5", - "name": "Anthropic: Claude Haiku 4.5", - "description": "Claude Haiku 4.5 is Anthropic’s fastest and most efficient model, delivering near-frontier intelligence at a fraction of the cost and latency of larger Claude models.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen3-vl-8b-thinking", - "name": "Qwen: Qwen3 VL 8B Thinking", - "description": "Qwen3-VL-8B-Thinking is the reasoning-optimized variant of the Qwen3-VL-8B multimodal model, designed for advanced visual and textual reasoning across complex scenes, documents, and temporal sequences.", - "context_length": 256000, - "hugging_face_id": "Qwen/Qwen3-VL-8B-Thinking" - }, - { - "id": "qwen/qwen3-vl-8b-instruct", - "name": "Qwen: Qwen3 VL 8B Instruct", - "description": "Qwen3-VL-8B-Instruct is a multimodal vision-language model from the Qwen3-VL series, built for high-fidelity understanding and reasoning across text, images, and video.", - "context_length": 131072, - "hugging_face_id": "Qwen/Qwen3-VL-8B-Instruct" - }, - { - "id": "openai/gpt-5-image", - "name": "OpenAI: GPT-5 Image", - "description": "[GPT-5](https://openrouter.ai/openai/gpt-5) Image combines OpenAI's most advanced language model with state-of-the-art image generation capabilities.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "inclusionai/ring-1t", - "name": "inclusionAI: Ring 1T", - "description": "Ring-1T has undergone continued scaling with large-scale verifiable reward reinforcement learning (RLVR) training, further unlocking the natural language reasoning capabilities of the.", - "context_length": 131072, - "hugging_face_id": "inclusionAI/Ring-1T" - }, - { - "id": "inclusionai/ling-1t", - "name": "inclusionAI: Ling-1T", - "description": "Ling-1T is a trillion-parameter open-weight large language model developed by inclusionAI and released under the MIT license.", - "context_length": 131072, - "hugging_face_id": "inclusionAI/Ling-1T" - }, - { - "id": "openai/o3-deep-research", - "name": "OpenAI: o3 Deep Research", - "description": "o3-deep-research is OpenAI's advanced model for deep research, designed to tackle complex, multi-step research tasks.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "openai/o4-mini-deep-research", - "name": "OpenAI: o4 Mini Deep Research", - "description": "o4-mini-deep-research is OpenAI's faster, more affordable deep research model—ideal for tackling complex, multi-step research tasks.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "nvidia/llama-3.3-nemotron-super-49b-v1.5", - "name": "NVIDIA: Llama 3.3 Nemotron Super 49B V1.5", - "description": "Llama-3.3-Nemotron-Super-49B-v1.5 is a 49B-parameter, English-centric reasoning/chat model derived from Meta’s Llama-3.3-70B-Instruct with a 128K context.", - "context_length": 131072, - "hugging_face_id": "nvidia/Llama-3_3-Nemotron-Super-49B-v1_5" - }, - { - "id": "baidu/ernie-4.5-21b-a3b-thinking", - "name": "Baidu: ERNIE 4.5 21B A3B Thinking", - "description": "ERNIE-4.5-21B-A3B-Thinking is Baidu's upgraded lightweight MoE model, refined to boost reasoning depth and quality for top-tier performance in logical puzzles, math, science, coding, text generation,.", - "context_length": 131072, - "hugging_face_id": "baidu/ERNIE-4.5-21B-A3B-Thinking" - }, - { - "id": "google/gemini-2.5-flash-image", - "name": "Google: Gemini 2.5 Flash Image (Nano Banana)", - "description": "Gemini 2.5 Flash Image, a.k.a.", - "context_length": 32768, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen3-vl-30b-a3b-thinking", - "name": "Qwen: Qwen3 VL 30B A3B Thinking", - "description": "Qwen3-VL-30B-A3B-Thinking is a multimodal model that unifies strong text generation with visual understanding for images and videos.", - "context_length": 131072, - "hugging_face_id": "Qwen/Qwen3-VL-30B-A3B-Thinking" - }, - { - "id": "qwen/qwen3-vl-30b-a3b-instruct", - "name": "Qwen: Qwen3 VL 30B A3B Instruct", - "description": "Qwen3-VL-30B-A3B-Instruct is a multimodal model that unifies strong text generation with visual understanding for images and videos.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-VL-30B-A3B-Instruct" - }, - { - "id": "openai/gpt-5-pro", - "name": "OpenAI: GPT-5 Pro", - "description": "GPT-5 Pro is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "z-ai/glm-4.6", - "name": "Z.AI: GLM 4.6", - "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex.", - "context_length": 202752, - "hugging_face_id": "" - }, - { - "id": "z-ai/glm-4.6:exacto", - "name": "Z.AI: GLM 4.6 (exacto)", - "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex.", - "context_length": 202752, - "hugging_face_id": "" - }, - { - "id": "anthropic/claude-sonnet-4.5", - "name": "Anthropic: Claude Sonnet 4.5", - "description": "Claude Sonnet 4.5 is Anthropic’s most advanced Sonnet model to date, optimized for real-world agents and coding workflows.", - "context_length": 1000000, - "hugging_face_id": "" - }, - { - "id": "deepseek/deepseek-v3.2-exp", - "name": "DeepSeek: DeepSeek V3.2 Exp", - "description": "DeepSeek-V3.2-Exp is an experimental large language model released by DeepSeek as an intermediate step between V3.1 and future architectures.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-V3.2-Exp" - }, - { - "id": "thedrummer/cydonia-24b-v4.1", - "name": "TheDrummer: Cydonia 24B V4.1", - "description": "Uncensored and creative writing model based on Mistral Small 3.2 24B with good recall, prompt adherence, and intelligence.", - "context_length": 131072, - "hugging_face_id": "thedrummer/cydonia-24b-v4.1" - }, - { - "id": "relace/relace-apply-3", - "name": "Relace: Relace Apply 3", - "description": "Relace Apply 3 is a specialized code-patching LLM that merges AI-suggested edits straight into your source files.", - "context_length": 256000, - "hugging_face_id": "" - }, - { - "id": "google/gemini-2.5-flash-preview-09-2025", - "name": "Google: Gemini 2.5 Flash Preview 09-2025", - "description": "Gemini 2.5 Flash Preview September 2025 Checkpoint is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "google/gemini-2.5-flash-lite-preview-09-2025", - "name": "Google: Gemini 2.5 Flash Lite Preview 09-2025", - "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen3-vl-235b-a22b-thinking", - "name": "Qwen: Qwen3 VL 235B A22B Thinking", - "description": "Qwen3-VL-235B-A22B Thinking is a multimodal model that unifies strong text generation with visual understanding across images and video.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-VL-235B-A22B-Thinking" - }, - { - "id": "qwen/qwen3-vl-235b-a22b-instruct", - "name": "Qwen: Qwen3 VL 235B A22B Instruct", - "description": "Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-VL-235B-A22B-Instruct" - }, - { - "id": "qwen/qwen3-max", - "name": "Qwen: Qwen3 Max", - "description": "Qwen3-Max is an updated release built on the Qwen3 series, offering major improvements in reasoning, instruction following, multilingual support, and long-tail knowledge coverage compared to the.", - "context_length": 256000, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen3-coder-plus", - "name": "Qwen: Qwen3 Coder Plus", - "description": "Qwen3 Coder Plus is Alibaba's proprietary version of the Open Source Qwen3 Coder 480B A35B.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-5-codex", - "name": "OpenAI: GPT-5 Codex", - "description": "GPT-5-Codex is a specialized version of GPT-5 optimized for software engineering and coding workflows.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "deepseek/deepseek-v3.1-terminus", - "name": "DeepSeek: DeepSeek V3.1 Terminus", - "description": "DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-V3.1-Terminus" - }, - { - "id": "deepseek/deepseek-v3.1-terminus:exacto", - "name": "DeepSeek: DeepSeek V3.1 Terminus (exacto)", - "description": "DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language.", - "context_length": 131072, - "hugging_face_id": "deepseek-ai/DeepSeek-V3.1-Terminus" - }, - { - "id": "x-ai/grok-4-fast", - "name": "xAI: Grok 4 Fast", - "description": "Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window.", - "context_length": 2000000, - "hugging_face_id": "" - }, - { - "id": "alibaba/tongyi-deepresearch-30b-a3b:free", - "name": "Tongyi DeepResearch 30B A3B (free)", - "description": "Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token.", - "context_length": 131072, - "hugging_face_id": "Alibaba-NLP/Tongyi-DeepResearch-30B-A3B" - }, - { - "id": "alibaba/tongyi-deepresearch-30b-a3b", - "name": "Tongyi DeepResearch 30B A3B", - "description": "Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token.", - "context_length": 131072, - "hugging_face_id": "Alibaba-NLP/Tongyi-DeepResearch-30B-A3B" - }, - { - "id": "qwen/qwen3-coder-flash", - "name": "Qwen: Qwen3 Coder Flash", - "description": "Qwen3 Coder Flash is Alibaba's fast and cost efficient version of their proprietary Qwen3 Coder Plus.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "arcee-ai/afm-4.5b", - "name": "Arcee AI: AFM 4.5B", - "description": "AFM-4.5B is a 4.5 billion parameter instruction-tuned language model developed by Arcee AI.", - "context_length": 65536, - "hugging_face_id": "arcee-ai/AFM-4.5B" - }, - { - "id": "opengvlab/internvl3-78b", - "name": "OpenGVLab: InternVL3 78B", - "description": "The InternVL3 series is an advanced multimodal large language model (MLLM).", - "context_length": 32768, - "hugging_face_id": "OpenGVLab/InternVL3-78B" - }, - { - "id": "qwen/qwen3-next-80b-a3b-thinking", - "name": "Qwen: Qwen3 Next 80B A3B Thinking", - "description": "Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured “thinking” traces by default.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Thinking" - }, - { - "id": "qwen/qwen3-next-80b-a3b-instruct", - "name": "Qwen: Qwen3 Next 80B A3B Instruct", - "description": "Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without “thinking” traces.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Instruct" - }, - { - "id": "meituan/longcat-flash-chat:free", - "name": "Meituan: LongCat Flash Chat (free)", - "description": "LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input.", - "context_length": 131072, - "hugging_face_id": "meituan-longcat/LongCat-Flash-Chat" - }, - { - "id": "meituan/longcat-flash-chat", - "name": "Meituan: LongCat Flash Chat", - "description": "LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input.", - "context_length": 131072, - "hugging_face_id": "meituan-longcat/LongCat-Flash-Chat" - }, - { - "id": "qwen/qwen-plus-2025-07-28", - "name": "Qwen: Qwen Plus 0728", - "description": "Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.", - "context_length": 1000000, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen-plus-2025-07-28:thinking", - "name": "Qwen: Qwen Plus 0728 (thinking)", - "description": "Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.", - "context_length": 1000000, - "hugging_face_id": "" - }, - { - "id": "nvidia/nemotron-nano-9b-v2:free", - "name": "NVIDIA: Nemotron Nano 9B V2 (free)", - "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks.", - "context_length": 128000, - "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-9B-v2" - }, - { - "id": "nvidia/nemotron-nano-9b-v2", - "name": "NVIDIA: Nemotron Nano 9B V2", - "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks.", - "context_length": 131072, - "hugging_face_id": "nvidia/NVIDIA-Nemotron-Nano-9B-v2" - }, - { - "id": "moonshotai/kimi-k2-0905", - "name": "MoonshotAI: Kimi K2 0905", - "description": "Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2).", - "context_length": 262144, - "hugging_face_id": "moonshotai/Kimi-K2-Instruct-0905" - }, - { - "id": "moonshotai/kimi-k2-0905:exacto", - "name": "MoonshotAI: Kimi K2 0905 (exacto)", - "description": "Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2).", - "context_length": 262144, - "hugging_face_id": "moonshotai/Kimi-K2-Instruct-0905" - }, - { - "id": "deepcogito/cogito-v2-preview-llama-70b", - "name": "Deep Cogito: Cogito V2 Preview Llama 70B", - "description": "Cogito v2 70B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection.", - "context_length": 32768, - "hugging_face_id": "deepcogito/cogito-v2-preview-llama-70B" - }, - { - "id": "deepcogito/cogito-v2-preview-llama-109b-moe", - "name": "Cogito V2 Preview Llama 109B", - "description": "An instruction-tuned, hybrid-reasoning Mixture-of-Experts model built on Llama-4-Scout-17B-16E.", - "context_length": 32767, - "hugging_face_id": "deepcogito/cogito-v2-preview-llama-109B-MoE" - }, - { - "id": "deepcogito/cogito-v2-preview-deepseek-671b", - "name": "Deep Cogito: Cogito V2 Preview Deepseek 671B", - "description": "Cogito v2 is a multilingual, instruction-tuned Mixture of Experts (MoE) large language model with 671 billion parameters.", - "context_length": 163840, - "hugging_face_id": "deepcogito/cogito-v2-preview-deepseek-671B-MoE" - }, - { - "id": "stepfun-ai/step3", - "name": "StepFun: Step3", - "description": "Step3 is a cutting-edge multimodal reasoning model—built on a Mixture-of-Experts architecture with 321B total parameters and 38B active.", - "context_length": 65536, - "hugging_face_id": "stepfun-ai/step3" - }, - { - "id": "qwen/qwen3-30b-a3b-thinking-2507", - "name": "Qwen: Qwen3 30B A3B Thinking 2507", - "description": "Qwen3-30B-A3B-Thinking-2507 is a 30B parameter Mixture-of-Experts reasoning model optimized for complex tasks requiring extended multi-step thinking.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-30B-A3B-Thinking-2507" - }, - { - "id": "x-ai/grok-code-fast-1", - "name": "xAI: Grok Code Fast 1", - "description": "Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding.", - "context_length": 256000, - "hugging_face_id": "" - }, - { - "id": "nousresearch/hermes-4-70b", - "name": "Nous: Hermes 4 70B", - "description": "Hermes 4 70B is a hybrid reasoning model from Nous Research, built on Meta-Llama-3.1-70B.", - "context_length": 131072, - "hugging_face_id": "NousResearch/Hermes-4-70B" - }, - { - "id": "nousresearch/hermes-4-405b", - "name": "Nous: Hermes 4 405B", - "description": "Hermes 4 is a large-scale reasoning model built on Meta-Llama-3.1-405B and released by Nous Research.", - "context_length": 131072, - "hugging_face_id": "NousResearch/Hermes-4-405B" - }, - { - "id": "google/gemini-2.5-flash-image-preview", - "name": "Google: Gemini 2.5 Flash Image Preview (Nano Banana)", - "description": "Gemini 2.5 Flash Image Preview, a.k.a.", - "context_length": 32768, - "hugging_face_id": "" - }, - { - "id": "deepseek/deepseek-chat-v3.1:free", - "name": "DeepSeek: DeepSeek V3.1 (free)", - "description": "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates.", - "context_length": 163800, - "hugging_face_id": "deepseek-ai/DeepSeek-V3.1" - }, - { - "id": "deepseek/deepseek-chat-v3.1", - "name": "DeepSeek: DeepSeek V3.1", - "description": "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-V3.1" - }, - { - "id": "openai/gpt-4o-audio-preview", - "name": "OpenAI: GPT-4o Audio", - "description": "The gpt-4o-audio-preview model adds support for audio inputs as prompts.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-medium-3.1", - "name": "Mistral: Mistral Medium 3.1", - "description": "Mistral Medium 3.1 is an updated version of Mistral Medium 3, which is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "baidu/ernie-4.5-21b-a3b", - "name": "Baidu: ERNIE 4.5 21B A3B", - "description": "A sophisticated text-based Mixture-of-Experts (MoE) model featuring 21B total parameters with 3B activated per token, delivering exceptional multimodal understanding and generation through.", - "context_length": 120000, - "hugging_face_id": "baidu/ERNIE-4.5-21B-A3B-PT" - }, - { - "id": "baidu/ernie-4.5-vl-28b-a3b", - "name": "Baidu: ERNIE 4.5 VL 28B A3B", - "description": "A powerful multimodal Mixture-of-Experts chat model featuring 28B total parameters with 3B activated per token, delivering exceptional text and vision understanding through its innovative.", - "context_length": 30000, - "hugging_face_id": "baidu/ERNIE-4.5-VL-28B-A3B-PT" - }, - { - "id": "z-ai/glm-4.5v", - "name": "Z.AI: GLM 4.5V", - "description": "GLM-4.5V is a vision-language foundation model for multimodal agent applications.", - "context_length": 65536, - "hugging_face_id": "zai-org/GLM-4.5V" - }, - { - "id": "ai21/jamba-mini-1.7", - "name": "AI21: Jamba Mini 1.7", - "description": "Jamba Mini 1.7 is a compact and efficient member of the Jamba open model family, incorporating key improvements in grounding and instruction-following while maintaining the benefits of the.", - "context_length": 256000, - "hugging_face_id": "ai21labs/AI21-Jamba-Mini-1.7" - }, - { - "id": "ai21/jamba-large-1.7", - "name": "AI21: Jamba Large 1.7", - "description": "Jamba Large 1.7 is the latest model in the Jamba open family, offering improvements in grounding, instruction-following, and overall efficiency.", - "context_length": 256000, - "hugging_face_id": "ai21labs/AI21-Jamba-Large-1.7" - }, - { - "id": "openai/gpt-5-chat", - "name": "OpenAI: GPT-5 Chat", - "description": "GPT-5 Chat is designed for advanced, natural, multimodal, and context-aware conversations for enterprise applications.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-5", - "name": "OpenAI: GPT-5", - "description": "GPT-5 is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-5-mini", - "name": "OpenAI: GPT-5 Mini", - "description": "GPT-5 Mini is a compact version of GPT-5, designed to handle lighter-weight reasoning tasks.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-5-nano", - "name": "OpenAI: GPT-5 Nano", - "description": "GPT-5-Nano is the smallest and fastest variant in the GPT-5 system, optimized for developer tools, rapid interactions, and ultra-low latency environments.", - "context_length": 400000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-oss-120b", - "name": "OpenAI: gpt-oss-120b", - "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases.", - "context_length": 131072, - "hugging_face_id": "openai/gpt-oss-120b" - }, - { - "id": "openai/gpt-oss-120b:exacto", - "name": "OpenAI: gpt-oss-120b (exacto)", - "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases.", - "context_length": 131072, - "hugging_face_id": "openai/gpt-oss-120b" - }, - { - "id": "openai/gpt-oss-20b:free", - "name": "OpenAI: gpt-oss-20b (free)", - "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license.", - "context_length": 131072, - "hugging_face_id": "openai/gpt-oss-20b" - }, - { - "id": "openai/gpt-oss-20b", - "name": "OpenAI: gpt-oss-20b", - "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license.", - "context_length": 131072, - "hugging_face_id": "openai/gpt-oss-20b" - }, - { - "id": "anthropic/claude-opus-4.1", - "name": "Anthropic: Claude Opus 4.1", - "description": "Claude Opus 4.1 is an updated version of Anthropic’s flagship model, offering improved performance in coding, reasoning, and agentic tasks.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "mistralai/codestral-2508", - "name": "Mistral: Codestral 2508", - "description": "Mistral's cutting-edge language model for coding released end of July 2025.", - "context_length": 256000, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen3-coder-30b-a3b-instruct", - "name": "Qwen: Qwen3 Coder 30B A3B Instruct", - "description": "Qwen3-Coder-30B-A3B-Instruct is a 30.5B parameter Mixture-of-Experts (MoE) model with 128 experts (8 active per forward pass), designed for advanced code generation, repository-scale understanding,.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-Coder-30B-A3B-Instruct" - }, - { - "id": "qwen/qwen3-30b-a3b-instruct-2507", - "name": "Qwen: Qwen3 30B A3B Instruct 2507", - "description": "Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-30B-A3B-Instruct-2507" - }, - { - "id": "z-ai/glm-4.5", - "name": "Z.AI: GLM 4.5", - "description": "GLM-4.5 is our latest flagship foundation model, purpose-built for agent-based applications.", - "context_length": 131072, - "hugging_face_id": "zai-org/GLM-4.5" - }, - { - "id": "z-ai/glm-4.5-air:free", - "name": "Z.AI: GLM 4.5 Air (free)", - "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications.", - "context_length": 131072, - "hugging_face_id": "zai-org/GLM-4.5-Air" - }, - { - "id": "z-ai/glm-4.5-air", - "name": "Z.AI: GLM 4.5 Air", - "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications.", - "context_length": 131072, - "hugging_face_id": "zai-org/GLM-4.5-Air" - }, - { - "id": "qwen/qwen3-235b-a22b-thinking-2507", - "name": "Qwen: Qwen3 235B A22B Thinking 2507", - "description": "Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-235B-A22B-Thinking-2507" - }, - { - "id": "z-ai/glm-4-32b", - "name": "Z.AI: GLM 4 32B ", - "description": "GLM 4 32B is a cost-effective foundation language model.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen3-coder:free", - "name": "Qwen: Qwen3 Coder 480B A35B (free)", - "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team.", - "context_length": 262000, - "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct" - }, - { - "id": "qwen/qwen3-coder", - "name": "Qwen: Qwen3 Coder 480B A35B", - "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct" - }, - { - "id": "qwen/qwen3-coder:exacto", - "name": "Qwen: Qwen3 Coder 480B A35B (exacto)", - "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct" - }, - { - "id": "bytedance/ui-tars-1.5-7b", - "name": "ByteDance: UI-TARS 7B ", - "description": "UI-TARS-1.5 is a multimodal vision-language agent optimized for GUI-based environments, including desktop interfaces, web browsers, mobile systems, and games.", - "context_length": 128000, - "hugging_face_id": "ByteDance-Seed/UI-TARS-1.5-7B" - }, - { - "id": "google/gemini-2.5-flash-lite", - "name": "Google: Gemini 2.5 Flash Lite", - "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen3-235b-a22b-2507", - "name": "Qwen: Qwen3 235B A22B Instruct 2507", - "description": "Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass.", - "context_length": 262144, - "hugging_face_id": "Qwen/Qwen3-235B-A22B-Instruct-2507" - }, - { - "id": "switchpoint/router", - "name": "Switchpoint Router", - "description": "Switchpoint AI's router instantly analyzes your request and directs it to the optimal AI from an ever-evolving library.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "moonshotai/kimi-k2:free", - "name": "MoonshotAI: Kimi K2 0711 (free)", - "description": "Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass.", - "context_length": 32768, - "hugging_face_id": "moonshotai/Kimi-K2-Instruct" - }, - { - "id": "moonshotai/kimi-k2", - "name": "MoonshotAI: Kimi K2 0711", - "description": "Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass.", - "context_length": 131072, - "hugging_face_id": "moonshotai/Kimi-K2-Instruct" - }, - { - "id": "thudm/glm-4.1v-9b-thinking", - "name": "THUDM: GLM 4.1V 9B Thinking", - "description": "GLM-4.1V-9B-Thinking is a 9B parameter vision-language model developed by THUDM, based on the GLM-4-9B foundation.", - "context_length": 65536, - "hugging_face_id": "THUDM/GLM-4.1V-9B-Thinking" - }, - { - "id": "mistralai/devstral-medium", - "name": "Mistral: Devstral Medium", - "description": "Devstral Medium is a high-performance code generation and agentic reasoning model developed jointly by Mistral AI and All Hands AI.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "mistralai/devstral-small", - "name": "Mistral: Devstral Small 1.1", - "description": "Devstral Small 1.1 is a 24B parameter open-weight language model for software engineering agents, developed by Mistral AI in collaboration with All Hands AI.", - "context_length": 128000, - "hugging_face_id": "mistralai/Devstral-Small-2507" - }, - { - "id": "cognitivecomputations/dolphin-mistral-24b-venice-edition:free", - "name": "Venice: Uncensored (free)", - "description": "Venice Uncensored Dolphin Mistral 24B Venice Edition is a fine-tuned variant of Mistral-Small-24B-Instruct-2501, developed by dphn.ai in collaboration with Venice.ai.", - "context_length": 32768, - "hugging_face_id": "cognitivecomputations/Dolphin-Mistral-24B-Venice-Edition" - }, - { - "id": "x-ai/grok-4", - "name": "xAI: Grok 4", - "description": "Grok 4 is xAI's latest reasoning model with a 256k context window.", - "context_length": 256000, - "hugging_face_id": "" - }, - { - "id": "google/gemma-3n-e2b-it:free", - "name": "Google: Gemma 3n 2B (free)", - "description": "Gemma 3n E2B IT is a multimodal, instruction-tuned model developed by Google DeepMind, designed to operate efficiently at an effective parameter size of 2B while leveraging a 6B architecture.", - "context_length": 8192, - "hugging_face_id": "google/gemma-3n-E2B-it" - }, - { - "id": "tencent/hunyuan-a13b-instruct", - "name": "Tencent: Hunyuan A13B Instruct", - "description": "Hunyuan-A13B is a 13B active parameter Mixture-of-Experts (MoE) language model developed by Tencent, with a total parameter count of 80B and support for reasoning via Chain-of-Thought.", - "context_length": 131072, - "hugging_face_id": "tencent/Hunyuan-A13B-Instruct" - }, - { - "id": "tngtech/deepseek-r1t2-chimera:free", - "name": "TNG: DeepSeek R1T2 Chimera (free)", - "description": "DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech.", - "context_length": 163840, - "hugging_face_id": "tngtech/DeepSeek-TNG-R1T2-Chimera" - }, - { - "id": "tngtech/deepseek-r1t2-chimera", - "name": "TNG: DeepSeek R1T2 Chimera", - "description": "DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech.", - "context_length": 163840, - "hugging_face_id": "tngtech/DeepSeek-TNG-R1T2-Chimera" - }, - { - "id": "morph/morph-v3-large", - "name": "Morph: Morph V3 Large", - "description": "Morph's high-accuracy apply model for complex code edits.", - "context_length": 262144, - "hugging_face_id": "" - }, - { - "id": "morph/morph-v3-fast", - "name": "Morph: Morph V3 Fast", - "description": "Morph's fastest apply model for code edits.", - "context_length": 81920, - "hugging_face_id": "" - }, - { - "id": "baidu/ernie-4.5-vl-424b-a47b", - "name": "Baidu: ERNIE 4.5 VL 424B A47B ", - "description": "ERNIE-4.5-VL-424B-A47B is a multimodal Mixture-of-Experts (MoE) model from Baidu’s ERNIE 4.5 series, featuring 424B total parameters with 47B active per token.", - "context_length": 123000, - "hugging_face_id": "baidu/ERNIE-4.5-VL-424B-A47B-PT" - }, - { - "id": "baidu/ernie-4.5-300b-a47b", - "name": "Baidu: ERNIE 4.5 300B A47B ", - "description": "ERNIE-4.5-300B-A47B is a 300B parameter Mixture-of-Experts (MoE) language model developed by Baidu as part of the ERNIE 4.5 series.", - "context_length": 123000, - "hugging_face_id": "baidu/ERNIE-4.5-300B-A47B-PT" - }, - { - "id": "thedrummer/anubis-70b-v1.1", - "name": "TheDrummer: Anubis 70B V1.1", - "description": "TheDrummer's Anubis v1.1 is an unaligned, creative Llama 3.3 70B model focused on providing character-driven roleplay & stories.", - "context_length": 131072, - "hugging_face_id": "TheDrummer/Anubis-70B-v1.1" - }, - { - "id": "inception/mercury", - "name": "Inception: Mercury", - "description": "Mercury is the first diffusion large language model (dLLM).", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-small-3.2-24b-instruct:free", - "name": "Mistral: Mistral Small 3.2 24B (free)", - "description": "Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling.", - "context_length": 131072, - "hugging_face_id": "mistralai/Mistral-Small-3.2-24B-Instruct-2506" - }, - { - "id": "mistralai/mistral-small-3.2-24b-instruct", - "name": "Mistral: Mistral Small 3.2 24B", - "description": "Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling.", - "context_length": 131072, - "hugging_face_id": "mistralai/Mistral-Small-3.2-24B-Instruct-2506" - }, - { - "id": "minimax/minimax-m1", - "name": "MiniMax: MiniMax M1", - "description": "MiniMax-M1 is a large-scale, open-weight reasoning model designed for extended context and high-efficiency inference.", - "context_length": 1000000, - "hugging_face_id": "" - }, - { - "id": "google/gemini-2.5-flash-lite-preview-06-17", - "name": "Google: Gemini 2.5 Flash Lite Preview 06-17", - "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "google/gemini-2.5-flash", - "name": "Google: Gemini 2.5 Flash", - "description": "Gemini 2.5 Flash is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "google/gemini-2.5-pro", - "name": "Google: Gemini 2.5 Pro", - "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "moonshotai/kimi-dev-72b", - "name": "MoonshotAI: Kimi Dev 72B", - "description": "Kimi-Dev-72B is an open-source large language model fine-tuned for software engineering and issue resolution tasks.", - "context_length": 131072, - "hugging_face_id": "moonshotai/Kimi-Dev-72B" - }, - { - "id": "openai/o3-pro", - "name": "OpenAI: o3 Pro", - "description": "The o-series of models are trained with reinforcement learning to think before they answer and perform complex reasoning.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "x-ai/grok-3-mini", - "name": "xAI: Grok 3 Mini", - "description": "A lightweight model that thinks before responding.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "x-ai/grok-3", - "name": "xAI: Grok 3", - "description": "Grok 3 is the latest model from xAI.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "mistralai/magistral-small-2506", - "name": "Mistral: Magistral Small 2506", - "description": "Magistral Small is a 24B parameter instruction-tuned model based on Mistral-Small-3.1 (2503), enhanced through supervised fine-tuning on traces from Magistral Medium and further refined via.", - "context_length": 40000, - "hugging_face_id": "mistralai/Magistral-Small-2506" - }, - { - "id": "mistralai/magistral-medium-2506:thinking", - "name": "Mistral: Magistral Medium 2506 (thinking)", - "description": "Magistral is Mistral's first reasoning model.", - "context_length": 40960, - "hugging_face_id": "" - }, - { - "id": "mistralai/magistral-medium-2506", - "name": "Mistral: Magistral Medium 2506", - "description": "Magistral is Mistral's first reasoning model.", - "context_length": 40960, - "hugging_face_id": "" - }, - { - "id": "google/gemini-2.5-pro-preview", - "name": "Google: Gemini 2.5 Pro Preview 06-05", - "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "deepseek/deepseek-r1-0528-qwen3-8b:free", - "name": "DeepSeek: DeepSeek R1 0528 Qwen3 8B (free)", - "description": "DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and.", - "context_length": 131072, - "hugging_face_id": "deepseek-ai/deepseek-r1-0528-qwen3-8b" - }, - { - "id": "deepseek/deepseek-r1-0528-qwen3-8b", - "name": "DeepSeek: DeepSeek R1 0528 Qwen3 8B", - "description": "DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and.", - "context_length": 32768, - "hugging_face_id": "deepseek-ai/deepseek-r1-0528-qwen3-8b" - }, - { - "id": "deepseek/deepseek-r1-0528:free", - "name": "DeepSeek: R1 0528 (free)", - "description": "May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-R1-0528" - }, - { - "id": "deepseek/deepseek-r1-0528", - "name": "DeepSeek: R1 0528", - "description": "May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-R1-0528" - }, - { - "id": "anthropic/claude-opus-4", - "name": "Anthropic: Claude Opus 4", - "description": "Claude Opus 4 is benchmarked as the world’s best coding model, at time of release, bringing sustained performance on complex, long-running tasks and agent workflows.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "anthropic/claude-sonnet-4", - "name": "Anthropic: Claude Sonnet 4", - "description": "Claude Sonnet 4 significantly enhances the capabilities of its predecessor, Sonnet 3.7, excelling in both coding and reasoning tasks with improved precision and controllability.", - "context_length": 1000000, - "hugging_face_id": "" - }, - { - "id": "mistralai/devstral-small-2505", - "name": "Mistral: Devstral Small 2505", - "description": "Devstral-Small-2505 is a 24B parameter agentic LLM fine-tuned from Mistral-Small-3.1, jointly developed by Mistral AI and All Hands AI for advanced software engineering tasks.", - "context_length": 128000, - "hugging_face_id": "mistralai/Devstral-Small-2505" - }, - { - "id": "google/gemma-3n-e4b-it:free", - "name": "Google: Gemma 3n 4B (free)", - "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets.", - "context_length": 8192, - "hugging_face_id": "google/gemma-3n-E4B-it" - }, - { - "id": "google/gemma-3n-e4b-it", - "name": "Google: Gemma 3n 4B", - "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets.", - "context_length": 32768, - "hugging_face_id": "google/gemma-3n-E4B-it" - }, - { - "id": "openai/codex-mini", - "name": "OpenAI: Codex Mini", - "description": "codex-mini-latest is a fine-tuned version of o4-mini specifically for use in Codex CLI.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "meta-llama/llama-3.3-8b-instruct:free", - "name": "Meta: Llama 3.3 8B Instruct (free)", - "description": "A lightweight and ultra-fast variant of Llama 3.3 70B, for use when quick response times are needed most.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "nousresearch/deephermes-3-mistral-24b-preview", - "name": "Nous: DeepHermes 3 Mistral 24B Preview", - "description": "DeepHermes 3 (Mistral 24B Preview) is an instruction-tuned language model by Nous Research based on Mistral-Small-24B, designed for chat, function calling, and advanced multi-turn reasoning.", - "context_length": 32768, - "hugging_face_id": "NousResearch/DeepHermes-3-Mistral-24B-Preview" - }, - { - "id": "mistralai/mistral-medium-3", - "name": "Mistral: Mistral Medium 3", - "description": "Mistral Medium 3 is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "google/gemini-2.5-pro-preview-05-06", - "name": "Google: Gemini 2.5 Pro Preview 05-06", - "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "arcee-ai/spotlight", - "name": "Arcee AI: Spotlight", - "description": "Spotlight is a 7‑billion‑parameter vision‑language model derived from Qwen 2.5‑VL and fine‑tuned by Arcee AI for tight image‑text grounding tasks.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "arcee-ai/maestro-reasoning", - "name": "Arcee AI: Maestro Reasoning", - "description": "Maestro Reasoning is Arcee's flagship analysis model: a 32 B‑parameter derivative of Qwen 2.5‑32 B tuned with DPO and chain‑of‑thought RL for step‑by‑step logic.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "arcee-ai/virtuoso-large", - "name": "Arcee AI: Virtuoso Large", - "description": "Virtuoso‑Large is Arcee's top‑tier general‑purpose LLM at 72 B parameters, tuned to tackle cross‑domain reasoning, creative writing and enterprise QA.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "arcee-ai/coder-large", - "name": "Arcee AI: Coder Large", - "description": "Coder‑Large is a 32 B‑parameter offspring of Qwen 2.5‑Instruct that has been further trained on permissively‑licensed GitHub, CodeSearchNet and synthetic bug‑fix corpora.", - "context_length": 32768, - "hugging_face_id": "" - }, - { - "id": "microsoft/phi-4-reasoning-plus", - "name": "Microsoft: Phi 4 Reasoning Plus", - "description": "Phi-4-reasoning-plus is an enhanced 14B parameter model from Microsoft, fine-tuned from Phi-4 with additional reinforcement learning to boost accuracy on math, science, and code reasoning tasks.", - "context_length": 32768, - "hugging_face_id": "microsoft/Phi-4-reasoning-plus" - }, - { - "id": "inception/mercury-coder", - "name": "Inception: Mercury Coder", - "description": "Mercury Coder is the first diffusion large language model (dLLM).", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen3-4b:free", - "name": "Qwen: Qwen3 4B (free)", - "description": "Qwen3-4B is a 4 billion parameter dense language model from the Qwen3 series, designed to support both general-purpose and reasoning-intensive tasks.", - "context_length": 40960, - "hugging_face_id": "Qwen/Qwen3-4B" - }, - { - "id": "deepseek/deepseek-prover-v2", - "name": "DeepSeek: DeepSeek Prover V2", - "description": "DeepSeek Prover V2 is a 671B parameter model, speculated to be geared towards logic and mathematics.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-Prover-V2-671B" - }, - { - "id": "meta-llama/llama-guard-4-12b", - "name": "Meta: Llama Guard 4 12B", - "description": "Llama Guard 4 is a Llama 4 Scout-derived multimodal pretrained model, fine-tuned for content safety classification.", - "context_length": 163840, - "hugging_face_id": "meta-llama/Llama-Guard-4-12B" - }, - { - "id": "qwen/qwen3-30b-a3b:free", - "name": "Qwen: Qwen3 30B A3B (free)", - "description": "Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent.", - "context_length": 40960, - "hugging_face_id": "Qwen/Qwen3-30B-A3B" - }, - { - "id": "qwen/qwen3-30b-a3b", - "name": "Qwen: Qwen3 30B A3B", - "description": "Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent.", - "context_length": 40960, - "hugging_face_id": "Qwen/Qwen3-30B-A3B" - }, - { - "id": "qwen/qwen3-8b", - "name": "Qwen: Qwen3 8B", - "description": "Qwen3-8B is a dense 8.2B parameter causal language model from the Qwen3 series, designed for both reasoning-heavy tasks and efficient dialogue.", - "context_length": 128000, - "hugging_face_id": "Qwen/Qwen3-8B" - }, - { - "id": "qwen/qwen3-14b:free", - "name": "Qwen: Qwen3 14B (free)", - "description": "Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue.", - "context_length": 40960, - "hugging_face_id": "Qwen/Qwen3-14B" - }, - { - "id": "qwen/qwen3-14b", - "name": "Qwen: Qwen3 14B", - "description": "Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue.", - "context_length": 40960, - "hugging_face_id": "Qwen/Qwen3-14B" - }, - { - "id": "qwen/qwen3-32b", - "name": "Qwen: Qwen3 32B", - "description": "Qwen3-32B is a dense 32.8B parameter causal language model from the Qwen3 series, optimized for both complex reasoning and efficient dialogue.", - "context_length": 40960, - "hugging_face_id": "Qwen/Qwen3-32B" - }, - { - "id": "qwen/qwen3-235b-a22b:free", - "name": "Qwen: Qwen3 235B A22B (free)", - "description": "Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass.", - "context_length": 40960, - "hugging_face_id": "Qwen/Qwen3-235B-A22B" - }, - { - "id": "qwen/qwen3-235b-a22b", - "name": "Qwen: Qwen3 235B A22B", - "description": "Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass.", - "context_length": 40960, - "hugging_face_id": "Qwen/Qwen3-235B-A22B" - }, - { - "id": "tngtech/deepseek-r1t-chimera:free", - "name": "TNG: DeepSeek R1T Chimera (free)", - "description": "DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3.", - "context_length": 163840, - "hugging_face_id": "tngtech/DeepSeek-R1T-Chimera" - }, - { - "id": "tngtech/deepseek-r1t-chimera", - "name": "TNG: DeepSeek R1T Chimera", - "description": "DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3.", - "context_length": 163840, - "hugging_face_id": "tngtech/DeepSeek-R1T-Chimera" - }, - { - "id": "microsoft/mai-ds-r1:free", - "name": "Microsoft: MAI DS R1 (free)", - "description": "MAI-DS-R1 is a post-trained variant of DeepSeek-R1 developed by the Microsoft AI team to improve the model’s responsiveness on previously blocked topics while enhancing its safety profile.", - "context_length": 163840, - "hugging_face_id": "microsoft/MAI-DS-R1" - }, - { - "id": "microsoft/mai-ds-r1", - "name": "Microsoft: MAI DS R1", - "description": "MAI-DS-R1 is a post-trained variant of DeepSeek-R1 developed by the Microsoft AI team to improve the model’s responsiveness on previously blocked topics while enhancing its safety profile.", - "context_length": 163840, - "hugging_face_id": "microsoft/MAI-DS-R1" - }, - { - "id": "openai/o4-mini-high", - "name": "OpenAI: o4 Mini High", - "description": "OpenAI o4-mini-high is the same model as [o4-mini](/openai/o4-mini) with reasoning_effort set to high.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "openai/o3", - "name": "OpenAI: o3", - "description": "o3 is a well-rounded and powerful model across domains.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "openai/o4-mini", - "name": "OpenAI: o4 Mini", - "description": "OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen2.5-coder-7b-instruct", - "name": "Qwen: Qwen2.5 Coder 7B Instruct", - "description": "Qwen2.5-Coder-7B-Instruct is a 7B parameter instruction-tuned language model optimized for code-related tasks such as code generation, reasoning, and bug fixing.", - "context_length": 32768, - "hugging_face_id": "Qwen/Qwen2.5-Coder-7B-Instruct" - }, - { - "id": "openai/gpt-4.1", - "name": "OpenAI: GPT-4.1", - "description": "GPT-4.1 is a flagship large language model optimized for advanced instruction following, real-world software engineering, and long-context reasoning.", - "context_length": 1047576, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4.1-mini", - "name": "OpenAI: GPT-4.1 Mini", - "description": "GPT-4.1 Mini is a mid-sized model delivering performance competitive with GPT-4o at substantially lower latency and cost.", - "context_length": 1047576, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4.1-nano", - "name": "OpenAI: GPT-4.1 Nano", - "description": "For tasks that demand low latency, GPT‑4.1 nano is the fastest and cheapest model in the GPT-4.1 series.", - "context_length": 1047576, - "hugging_face_id": "" - }, - { - "id": "eleutherai/llemma_7b", - "name": "EleutherAI: Llemma 7b", - "description": "Llemma 7B is a language model for mathematics.", - "context_length": 4096, - "hugging_face_id": "EleutherAI/llemma_7b" - }, - { - "id": "alfredpros/codellama-7b-instruct-solidity", - "name": "AlfredPros: CodeLLaMa 7B Instruct Solidity", - "description": "A finetuned 7 billion parameters Code LLaMA - Instruct model to generate Solidity smart contract using 4-bit QLoRA finetuning provided by PEFT library.", - "context_length": 4096, - "hugging_face_id": "AlfredPros/CodeLlama-7b-Instruct-Solidity" - }, - { - "id": "arliai/qwq-32b-arliai-rpr-v1:free", - "name": "ArliAI: QwQ 32B RpR v1 (free)", - "description": "QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series.", - "context_length": 32768, - "hugging_face_id": "ArliAI/QwQ-32B-ArliAI-RpR-v1" - }, - { - "id": "arliai/qwq-32b-arliai-rpr-v1", - "name": "ArliAI: QwQ 32B RpR v1", - "description": "QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series.", - "context_length": 32768, - "hugging_face_id": "ArliAI/QwQ-32B-ArliAI-RpR-v1" - }, - { - "id": "agentica-org/deepcoder-14b-preview:free", - "name": "Agentica: Deepcoder 14B Preview (free)", - "description": "DeepCoder-14B-Preview is a 14B parameter code generation model fine-tuned from DeepSeek-R1-Distill-Qwen-14B using reinforcement learning with GRPO+ and iterative context lengthening.", - "context_length": 96000, - "hugging_face_id": "agentica-org/DeepCoder-14B-Preview" - }, - { - "id": "agentica-org/deepcoder-14b-preview", - "name": "Agentica: Deepcoder 14B Preview", - "description": "DeepCoder-14B-Preview is a 14B parameter code generation model fine-tuned from DeepSeek-R1-Distill-Qwen-14B using reinforcement learning with GRPO+ and iterative context lengthening.", - "context_length": 96000, - "hugging_face_id": "agentica-org/DeepCoder-14B-Preview" - }, - { - "id": "x-ai/grok-3-mini-beta", - "name": "xAI: Grok 3 Mini Beta", - "description": "Grok 3 Mini is a lightweight, smaller thinking model.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "x-ai/grok-3-beta", - "name": "xAI: Grok 3 Beta", - "description": "Grok 3 is the latest model from xAI.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "nvidia/llama-3.1-nemotron-ultra-253b-v1", - "name": "NVIDIA: Llama 3.1 Nemotron Ultra 253B v1", - "description": "Llama-3.1-Nemotron-Ultra-253B-v1 is a large language model (LLM) optimized for advanced reasoning, human-interactive chat, retrieval-augmented generation (RAG), and tool-calling tasks.", - "context_length": 131072, - "hugging_face_id": "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1" - }, - { - "id": "meta-llama/llama-4-maverick:free", - "name": "Meta: Llama 4 Maverick (free)", - "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per.", - "context_length": 128000, - "hugging_face_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct" - }, - { - "id": "meta-llama/llama-4-maverick", - "name": "Meta: Llama 4 Maverick", - "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per.", - "context_length": 1048576, - "hugging_face_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct" - }, - { - "id": "meta-llama/llama-4-scout:free", - "name": "Meta: Llama 4 Scout (free)", - "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B.", - "context_length": 128000, - "hugging_face_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct" - }, - { - "id": "meta-llama/llama-4-scout", - "name": "Meta: Llama 4 Scout", - "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B.", - "context_length": 327680, - "hugging_face_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct" - }, - { - "id": "qwen/qwen2.5-vl-32b-instruct:free", - "name": "Qwen: Qwen2.5 VL 32B Instruct (free)", - "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities.", - "context_length": 16384, - "hugging_face_id": "Qwen/Qwen2.5-VL-32B-Instruct" - }, - { - "id": "qwen/qwen2.5-vl-32b-instruct", - "name": "Qwen: Qwen2.5 VL 32B Instruct", - "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities.", - "context_length": 16384, - "hugging_face_id": "Qwen/Qwen2.5-VL-32B-Instruct" - }, - { - "id": "deepseek/deepseek-chat-v3-0324:free", - "name": "DeepSeek: DeepSeek V3 0324 (free)", - "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-V3-0324" - }, - { - "id": "deepseek/deepseek-chat-v3-0324", - "name": "DeepSeek: DeepSeek V3 0324", - "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-V3-0324" - }, - { - "id": "openai/o1-pro", - "name": "OpenAI: o1-pro", - "description": "The o1 series of models are trained with reinforcement learning to think before they answer and perform complex reasoning.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-small-3.1-24b-instruct:free", - "name": "Mistral: Mistral Small 3.1 24B (free)", - "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities.", - "context_length": 96000, - "hugging_face_id": "mistralai/Mistral-Small-3.1-24B-Instruct-2503" - }, - { - "id": "mistralai/mistral-small-3.1-24b-instruct", - "name": "Mistral: Mistral Small 3.1 24B", - "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities.", - "context_length": 131072, - "hugging_face_id": "mistralai/Mistral-Small-3.1-24B-Instruct-2503" - }, - { - "id": "allenai/olmo-2-0325-32b-instruct", - "name": "AllenAI: Olmo 2 32B Instruct", - "description": "OLMo-2 32B Instruct is a supervised instruction-finetuned variant of the OLMo-2 32B March 2025 base model.", - "context_length": 4096, - "hugging_face_id": "allenai/OLMo-2-0325-32B-Instruct" - }, - { - "id": "google/gemma-3-4b-it:free", - "name": "Google: Gemma 3 4B (free)", - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", - "context_length": 32768, - "hugging_face_id": "google/gemma-3-4b-it" - }, - { - "id": "google/gemma-3-4b-it", - "name": "Google: Gemma 3 4B", - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", - "context_length": 96000, - "hugging_face_id": "google/gemma-3-4b-it" - }, - { - "id": "google/gemma-3-12b-it:free", - "name": "Google: Gemma 3 12B (free)", - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", - "context_length": 32768, - "hugging_face_id": "google/gemma-3-12b-it" - }, - { - "id": "google/gemma-3-12b-it", - "name": "Google: Gemma 3 12B", - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", - "context_length": 131072, - "hugging_face_id": "google/gemma-3-12b-it" - }, - { - "id": "cohere/command-a", - "name": "Cohere: Command A", - "description": "Command A is an open-weights 111B parameter model with a 256k context window focused on delivering great performance across agentic, multilingual, and coding use cases.", - "context_length": 256000, - "hugging_face_id": "CohereForAI/c4ai-command-a-03-2025" - }, - { - "id": "openai/gpt-4o-mini-search-preview", - "name": "OpenAI: GPT-4o-mini Search Preview", - "description": "GPT-4o mini Search Preview is a specialized model for web search in Chat Completions.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4o-search-preview", - "name": "OpenAI: GPT-4o Search Preview", - "description": "GPT-4o Search Previewis a specialized model for web search in Chat Completions.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "google/gemma-3-27b-it:free", - "name": "Google: Gemma 3 27B (free)", - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "google/gemma-3-27b-it", - "name": "Google: Gemma 3 27B", - "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "thedrummer/skyfall-36b-v2", - "name": "TheDrummer: Skyfall 36B V2", - "description": "Skyfall 36B v2 is an enhanced iteration of Mistral Small 2501, specifically fine-tuned for improved creativity, nuanced writing, role-playing, and coherent storytelling.", - "context_length": 32768, - "hugging_face_id": "TheDrummer/Skyfall-36B-v2" - }, - { - "id": "microsoft/phi-4-multimodal-instruct", - "name": "Microsoft: Phi 4 Multimodal Instruct", - "description": "Phi-4 Multimodal Instruct is a versatile 5.6B parameter foundation model that combines advanced reasoning and instruction-following capabilities across both text and visual inputs, providing accurate.", - "context_length": 131072, - "hugging_face_id": "microsoft/Phi-4-multimodal-instruct" - }, - { - "id": "perplexity/sonar-reasoning-pro", - "name": "Perplexity: Sonar Reasoning Pro", - "description": "Note: Sonar Pro pricing includes Perplexity search pricing.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "perplexity/sonar-pro", - "name": "Perplexity: Sonar Pro", - "description": "Note: Sonar Pro pricing includes Perplexity search pricing.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "perplexity/sonar-deep-research", - "name": "Perplexity: Sonar Deep Research", - "description": "Sonar Deep Research is a research-focused model designed for multi-step retrieval, synthesis, and reasoning across complex topics.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "qwen/qwq-32b", - "name": "Qwen: QwQ 32B", - "description": "QwQ is the reasoning model of the Qwen series.", - "context_length": 32768, - "hugging_face_id": "Qwen/QwQ-32B" - }, - { - "id": "google/gemini-2.0-flash-lite-001", - "name": "Google: Gemini 2.0 Flash Lite", - "description": "Gemini 2.0 Flash Lite offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "anthropic/claude-3.7-sonnet:thinking", - "name": "Anthropic: Claude 3.7 Sonnet (thinking)", - "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "anthropic/claude-3.7-sonnet", - "name": "Anthropic: Claude 3.7 Sonnet", - "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-saba", - "name": "Mistral: Saba", - "description": "Mistral Saba is a 24B-parameter language model specifically designed for the Middle East and South Asia, delivering accurate and contextually relevant responses while maintaining efficient performance.", - "context_length": 32768, - "hugging_face_id": "" - }, - { - "id": "meta-llama/llama-guard-3-8b", - "name": "Llama Guard 3 8B", - "description": "Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.", - "context_length": 131072, - "hugging_face_id": "meta-llama/Llama-Guard-3-8B" - }, - { - "id": "openai/o3-mini-high", - "name": "OpenAI: o3 Mini High", - "description": "OpenAI o3-mini-high is the same model as [o3-mini](/openai/o3-mini) with reasoning_effort set to high.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "google/gemini-2.0-flash-001", - "name": "Google: Gemini 2.0 Flash", - "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen-vl-plus", - "name": "Qwen: Qwen VL Plus", - "description": "Qwen's Enhanced Large Visual Language Model.", - "context_length": 7500, - "hugging_face_id": "" - }, - { - "id": "aion-labs/aion-1.0", - "name": "AionLabs: Aion-1.0", - "description": "Aion-1.0 is a multi-model system designed for high performance across various tasks, including reasoning and coding.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "aion-labs/aion-1.0-mini", - "name": "AionLabs: Aion-1.0-Mini", - "description": "Aion-1.0-Mini 32B parameter model is a distilled version of the DeepSeek-R1 model, designed for strong performance in reasoning domains such as mathematics, coding, and logic.", - "context_length": 131072, - "hugging_face_id": "FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview" - }, - { - "id": "aion-labs/aion-rp-llama-3.1-8b", - "name": "AionLabs: Aion-RP 1.0 (8B)", - "description": "Aion-RP-Llama-3.1-8B ranks the highest in the character evaluation portion of the RPBench-Auto benchmark, a roleplaying-specific variant of Arena-Hard-Auto, where LLMs evaluate each other’s responses.", - "context_length": 32768, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen-vl-max", - "name": "Qwen: Qwen VL Max", - "description": "Qwen VL Max is a visual understanding model with 7500 tokens context length.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen-turbo", - "name": "Qwen: Qwen-Turbo", - "description": "Qwen-Turbo, based on Qwen2.5, is a 1M context model that provides fast speed and low cost, suitable for simple tasks.", - "context_length": 1000000, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen2.5-vl-72b-instruct", - "name": "Qwen: Qwen2.5 VL 72B Instruct", - "description": "Qwen2.5-VL is proficient in recognizing common objects such as flowers, birds, fish, and insects.", - "context_length": 32768, - "hugging_face_id": "Qwen/Qwen2.5-VL-72B-Instruct" - }, - { - "id": "qwen/qwen-plus", - "name": "Qwen: Qwen-Plus", - "description": "Qwen-Plus, based on the Qwen2.5 foundation model, is a 131K context model with a balanced performance, speed, and cost combination.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen-max", - "name": "Qwen: Qwen-Max ", - "description": "Qwen-Max, based on Qwen2.5, provides the best inference performance among [Qwen models](/qwen), especially for complex multi-step tasks.", - "context_length": 32768, - "hugging_face_id": "" - }, - { - "id": "openai/o3-mini", - "name": "OpenAI: o3 Mini", - "description": "OpenAI o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-small-24b-instruct-2501:free", - "name": "Mistral: Mistral Small 3 (free)", - "description": "Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks.", - "context_length": 32768, - "hugging_face_id": "mistralai/Mistral-Small-24B-Instruct-2501" - }, - { - "id": "mistralai/mistral-small-24b-instruct-2501", - "name": "Mistral: Mistral Small 3", - "description": "Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks.", - "context_length": 32768, - "hugging_face_id": "mistralai/Mistral-Small-24B-Instruct-2501" - }, - { - "id": "deepseek/deepseek-r1-distill-qwen-32b", - "name": "DeepSeek: R1 Distill Qwen 32B", - "description": "DeepSeek R1 Distill Qwen 32B is a distilled large language model based on [Qwen 2.5 32B](https://huggingface.co/Qwen/Qwen2.5-32B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1).", - "context_length": 131072, - "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" - }, - { - "id": "deepseek/deepseek-r1-distill-qwen-14b", - "name": "DeepSeek: R1 Distill Qwen 14B", - "description": "DeepSeek R1 Distill Qwen 14B is a distilled large language model based on [Qwen 2.5 14B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B), using outputs from [DeepSeek.", - "context_length": 32768, - "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B" - }, - { - "id": "perplexity/sonar-reasoning", - "name": "Perplexity: Sonar Reasoning", - "description": "Sonar Reasoning is a reasoning model provided by Perplexity based on [DeepSeek R1](/deepseek/deepseek-r1).", - "context_length": 127000, - "hugging_face_id": "" - }, - { - "id": "perplexity/sonar", - "name": "Perplexity: Sonar", - "description": "Sonar is lightweight, affordable, fast, and simple to use — now featuring citations and the ability to customize sources.", - "context_length": 127072, - "hugging_face_id": "" - }, - { - "id": "deepseek/deepseek-r1-distill-llama-70b:free", - "name": "DeepSeek: R1 Distill Llama 70B (free)", - "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1).", - "context_length": 8192, - "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B" - }, - { - "id": "deepseek/deepseek-r1-distill-llama-70b", - "name": "DeepSeek: R1 Distill Llama 70B", - "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1).", - "context_length": 131072, - "hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B" - }, - { - "id": "deepseek/deepseek-r1:free", - "name": "DeepSeek: R1 (free)", - "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-R1" - }, - { - "id": "deepseek/deepseek-r1", - "name": "DeepSeek: R1", - "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-R1" - }, - { - "id": "minimax/minimax-01", - "name": "MiniMax: MiniMax-01", - "description": "MiniMax-01 is a combines MiniMax-Text-01 for text generation and MiniMax-VL-01 for image understanding.", - "context_length": 1000192, - "hugging_face_id": "MiniMaxAI/MiniMax-Text-01" - }, - { - "id": "mistralai/codestral-2501", - "name": "Mistral: Codestral 2501", - "description": "[Mistral](/mistralai)'s cutting-edge language model for coding.", - "context_length": 256000, - "hugging_face_id": "" - }, - { - "id": "microsoft/phi-4", - "name": "Microsoft: Phi 4", - "description": "[Microsoft Research](/microsoft) Phi-4 is designed to perform well in complex reasoning tasks and can operate efficiently in situations with limited memory or where quick responses are needed.", - "context_length": 16384, - "hugging_face_id": "microsoft/phi-4" - }, - { - "id": "sao10k/l3.1-70b-hanami-x1", - "name": "Sao10K: Llama 3.1 70B Hanami x1", - "description": "This is [Sao10K](/sao10k)'s experiment over [Euryale v2.2](/sao10k/l3.1-euryale-70b).", - "context_length": 16000, - "hugging_face_id": "Sao10K/L3.1-70B-Hanami-x1" - }, - { - "id": "deepseek/deepseek-chat", - "name": "DeepSeek: DeepSeek V3", - "description": "DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions.", - "context_length": 163840, - "hugging_face_id": "deepseek-ai/DeepSeek-V3" - }, - { - "id": "sao10k/l3.3-euryale-70b", - "name": "Sao10K: Llama 3.3 Euryale 70B", - "description": "Euryale L3.3 70B is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k).", - "context_length": 131072, - "hugging_face_id": "Sao10K/L3.3-70B-Euryale-v2.3" - }, - { - "id": "openai/o1", - "name": "OpenAI: o1", - "description": "The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "cohere/command-r7b-12-2024", - "name": "Cohere: Command R7B (12-2024)", - "description": "Command R7B (12-2024) is a small, fast update of the Command R+ model, delivered in December 2024.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "google/gemini-2.0-flash-exp:free", - "name": "Google: Gemini 2.0 Flash Experimental (free)", - "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro.", - "context_length": 1048576, - "hugging_face_id": "" - }, - { - "id": "meta-llama/llama-3.3-70b-instruct:free", - "name": "Meta: Llama 3.3 70B Instruct (free)", - "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out).", - "context_length": 131072, - "hugging_face_id": "meta-llama/Llama-3.3-70B-Instruct" - }, - { - "id": "meta-llama/llama-3.3-70b-instruct", - "name": "Meta: Llama 3.3 70B Instruct", - "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out).", - "context_length": 131072, - "hugging_face_id": "meta-llama/Llama-3.3-70B-Instruct" - }, - { - "id": "amazon/nova-lite-v1", - "name": "Amazon: Nova Lite 1.0", - "description": "Amazon Nova Lite 1.0 is a very low-cost multimodal model from Amazon that focused on fast processing of image, video, and text inputs to generate text output.", - "context_length": 300000, - "hugging_face_id": "" - }, - { - "id": "amazon/nova-micro-v1", - "name": "Amazon: Nova Micro 1.0", - "description": "Amazon Nova Micro 1.0 is a text-only model that delivers the lowest latency responses in the Amazon Nova family of models at a very low cost.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "amazon/nova-pro-v1", - "name": "Amazon: Nova Pro 1.0", - "description": "Amazon Nova Pro 1.0 is a capable multimodal model from Amazon focused on providing a combination of accuracy, speed, and cost for a wide range of tasks.", - "context_length": 300000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4o-2024-11-20", - "name": "OpenAI: GPT-4o (2024-11-20)", - "description": "The 2024-11-20 version of GPT-4o offers a leveled-up creative writing ability with more natural, engaging, and tailored writing to improve relevance & readability.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-large-2411", - "name": "Mistral Large 2411", - "description": "Mistral Large 2 2411 is an update of [Mistral Large 2](/mistralai/mistral-large) released together with [Pixtral Large 2411](/mistralai/pixtral-large-2411)\n\nIt provides a significant upgrade on the.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-large-2407", - "name": "Mistral Large 2407", - "description": "This is Mistral AI's flagship model, Mistral Large 2 (version mistral-large-2407).", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "mistralai/pixtral-large-2411", - "name": "Mistral: Pixtral Large 2411", - "description": "Pixtral Large is a 124B parameter, open-weight, multimodal model built on top of [Mistral Large 2](/mistralai/mistral-large-2411).", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen-2.5-coder-32b-instruct:free", - "name": "Qwen2.5 Coder 32B Instruct (free)", - "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen).", - "context_length": 32768, - "hugging_face_id": "Qwen/Qwen2.5-Coder-32B-Instruct" - }, - { - "id": "qwen/qwen-2.5-coder-32b-instruct", - "name": "Qwen2.5 Coder 32B Instruct", - "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen).", - "context_length": 32768, - "hugging_face_id": "Qwen/Qwen2.5-Coder-32B-Instruct" - }, - { - "id": "raifle/sorcererlm-8x22b", - "name": "SorcererLM 8x22B", - "description": "SorcererLM is an advanced RP and storytelling model, built as a Low-rank 16-bit LoRA fine-tuned on [WizardLM-2 8x22B](/microsoft/wizardlm-2-8x22b).", - "context_length": 16000, - "hugging_face_id": "rAIfle/SorcererLM-8x22b-bf16" - }, - { - "id": "thedrummer/unslopnemo-12b", - "name": "TheDrummer: UnslopNemo 12B", - "description": "UnslopNemo v4.1 is the latest addition from the creator of Rocinante, designed for adventure writing and role-play scenarios.", - "context_length": 32768, - "hugging_face_id": "TheDrummer/UnslopNemo-12B-v4.1" - }, - { - "id": "anthropic/claude-3.5-haiku", - "name": "Anthropic: Claude 3.5 Haiku", - "description": "Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "anthropic/claude-3.5-haiku-20241022", - "name": "Anthropic: Claude 3.5 Haiku (2024-10-22)", - "description": "Claude 3.5 Haiku features enhancements across all skill sets including coding, tool use, and reasoning.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "anthracite-org/magnum-v4-72b", - "name": "Magnum v4 72B", - "description": "This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and.", - "context_length": 16384, - "hugging_face_id": "anthracite-org/magnum-v4-72b" - }, - { - "id": "anthropic/claude-3.5-sonnet", - "name": "Anthropic: Claude 3.5 Sonnet", - "description": "New Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "mistralai/ministral-3b", - "name": "Mistral: Ministral 3B", - "description": "Ministral 3B is a 3B parameter model optimized for on-device and edge computing.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "mistralai/ministral-8b", - "name": "Mistral: Ministral 8B", - "description": "Ministral 8B is an 8B parameter model featuring a unique interleaved sliding-window attention pattern for faster, memory-efficient inference.", - "context_length": 131072, - "hugging_face_id": "" - }, - { - "id": "qwen/qwen-2.5-7b-instruct", - "name": "Qwen: Qwen2.5 7B Instruct", - "description": "Qwen2.5 7B is the latest series of Qwen large language models.", - "context_length": 32768, - "hugging_face_id": "Qwen/Qwen2.5-7B-Instruct" - }, - { - "id": "nvidia/llama-3.1-nemotron-70b-instruct", - "name": "NVIDIA: Llama 3.1 Nemotron 70B Instruct", - "description": "NVIDIA's Llama 3.1 Nemotron 70B is a language model designed for generating precise and useful responses.", - "context_length": 131072, - "hugging_face_id": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF" - }, - { - "id": "inflection/inflection-3-productivity", - "name": "Inflection: Inflection 3 Productivity", - "description": "Inflection 3 Productivity is optimized for following instructions.", - "context_length": 8000, - "hugging_face_id": "" - }, - { - "id": "inflection/inflection-3-pi", - "name": "Inflection: Inflection 3 Pi", - "description": "Inflection 3 Pi powers Inflection's [Pi](https://pi.ai) chatbot, including backstory, emotional intelligence, productivity, and safety.", - "context_length": 8000, - "hugging_face_id": "" - }, - { - "id": "thedrummer/rocinante-12b", - "name": "TheDrummer: Rocinante 12B", - "description": "Rocinante 12B is designed for engaging storytelling and rich prose.", - "context_length": 32768, - "hugging_face_id": "TheDrummer/Rocinante-12B-v1.1" - }, - { - "id": "meta-llama/llama-3.2-90b-vision-instruct", - "name": "Meta: Llama 3.2 90B Vision Instruct", - "description": "The Llama 90B Vision model is a top-tier, 90-billion-parameter multimodal model designed for the most challenging visual reasoning and language tasks.", - "context_length": 32768, - "hugging_face_id": "meta-llama/Llama-3.2-90B-Vision-Instruct" - }, - { - "id": "meta-llama/llama-3.2-1b-instruct", - "name": "Meta: Llama 3.2 1B Instruct", - "description": "Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis.", - "context_length": 60000, - "hugging_face_id": "meta-llama/Llama-3.2-1B-Instruct" - }, - { - "id": "meta-llama/llama-3.2-3b-instruct:free", - "name": "Meta: Llama 3.2 3B Instruct (free)", - "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization.", - "context_length": 131072, - "hugging_face_id": "meta-llama/Llama-3.2-3B-Instruct" - }, - { - "id": "meta-llama/llama-3.2-3b-instruct", - "name": "Meta: Llama 3.2 3B Instruct", - "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization.", - "context_length": 131072, - "hugging_face_id": "meta-llama/Llama-3.2-3B-Instruct" - }, - { - "id": "meta-llama/llama-3.2-11b-vision-instruct", - "name": "Meta: Llama 3.2 11B Vision Instruct", - "description": "Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data.", - "context_length": 131072, - "hugging_face_id": "meta-llama/Llama-3.2-11B-Vision-Instruct" - }, - { - "id": "qwen/qwen-2.5-72b-instruct:free", - "name": "Qwen2.5 72B Instruct (free)", - "description": "Qwen2.5 72B is the latest series of Qwen large language models.", - "context_length": 32768, - "hugging_face_id": "Qwen/Qwen2.5-72B-Instruct" - }, - { - "id": "qwen/qwen-2.5-72b-instruct", - "name": "Qwen2.5 72B Instruct", - "description": "Qwen2.5 72B is the latest series of Qwen large language models.", - "context_length": 32768, - "hugging_face_id": "Qwen/Qwen2.5-72B-Instruct" - }, - { - "id": "neversleep/llama-3.1-lumimaid-8b", - "name": "NeverSleep: Lumimaid v0.2 8B", - "description": "Lumimaid v0.2 8B is a finetune of [Llama 3.1 8B](/models/meta-llama/llama-3.1-8b-instruct) with a \"HUGE step up dataset wise\" compared to Lumimaid v0.1.", - "context_length": 32768, - "hugging_face_id": "NeverSleep/Lumimaid-v0.2-8B" - }, - { - "id": "mistralai/pixtral-12b", - "name": "Mistral: Pixtral 12B", - "description": "The first multi-modal, text+image-to-text model from Mistral AI.", - "context_length": 32768, - "hugging_face_id": "mistralai/Pixtral-12B-2409" - }, - { - "id": "cohere/command-r-08-2024", - "name": "Cohere: Command R (08-2024)", - "description": "command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "cohere/command-r-plus-08-2024", - "name": "Cohere: Command R+ (08-2024)", - "description": "command-r-plus-08-2024 is an update of the [Command R+](/models/cohere/command-r-plus) with roughly 50% higher throughput and 25% lower latencies as compared to the previous Command R+ version, while.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "sao10k/l3.1-euryale-70b", - "name": "Sao10K: Llama 3.1 Euryale 70B v2.2", - "description": "Euryale L3.1 70B v2.2 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k).", - "context_length": 32768, - "hugging_face_id": "Sao10K/L3.1-70B-Euryale-v2.2" - }, - { - "id": "qwen/qwen-2.5-vl-7b-instruct", - "name": "Qwen: Qwen2.5-VL 7B Instruct", - "description": "Qwen2.5 VL 7B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art.", - "context_length": 32768, - "hugging_face_id": "Qwen/Qwen2.5-VL-7B-Instruct" - }, - { - "id": "microsoft/phi-3.5-mini-128k-instruct", - "name": "Microsoft: Phi-3.5 Mini 128K Instruct", - "description": "Phi-3.5 models are lightweight, state-of-the-art open models.", - "context_length": 128000, - "hugging_face_id": "microsoft/Phi-3.5-mini-instruct" - }, - { - "id": "nousresearch/hermes-3-llama-3.1-70b", - "name": "Nous: Hermes 3 70B Instruct", - "description": "Hermes 3 is a generalist language model with many improvements over [Hermes 2](/models/nousresearch/nous-hermes-2-mistral-7b-dpo), including advanced agentic capabilities, much better roleplaying,.", - "context_length": 65536, - "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-70B" - }, - { - "id": "nousresearch/hermes-3-llama-3.1-405b:free", - "name": "Nous: Hermes 3 405B Instruct (free)", - "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context.", - "context_length": 131072, - "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-405B" - }, - { - "id": "nousresearch/hermes-3-llama-3.1-405b", - "name": "Nous: Hermes 3 405B Instruct", - "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context.", - "context_length": 131072, - "hugging_face_id": "NousResearch/Hermes-3-Llama-3.1-405B" - }, - { - "id": "openai/chatgpt-4o-latest", - "name": "OpenAI: ChatGPT-4o", - "description": "OpenAI ChatGPT 4o is continually updated by OpenAI to point to the current version of GPT-4o used by ChatGPT.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "sao10k/l3-lunaris-8b", - "name": "Sao10K: Llama 3 8B Lunaris", - "description": "Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3.", - "context_length": 8192, - "hugging_face_id": "Sao10K/L3-8B-Lunaris-v1" - }, - { - "id": "openai/gpt-4o-2024-08-06", - "name": "OpenAI: GPT-4o (2024-08-06)", - "description": "The 2024-08-06 version of GPT-4o offers improved performance in structured outputs, with the ability to supply a JSON schema in the respone_format.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "meta-llama/llama-3.1-405b", - "name": "Meta: Llama 3.1 405B (base)", - "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors.", - "context_length": 32768, - "hugging_face_id": "meta-llama/llama-3.1-405B" - }, - { - "id": "meta-llama/llama-3.1-70b-instruct", - "name": "Meta: Llama 3.1 70B Instruct", - "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors.", - "context_length": 131072, - "hugging_face_id": "meta-llama/Meta-Llama-3.1-70B-Instruct" - }, - { - "id": "meta-llama/llama-3.1-405b-instruct", - "name": "Meta: Llama 3.1 405B Instruct", - "description": "The highly anticipated 400B class of Llama3 is here.", - "context_length": 130815, - "hugging_face_id": "meta-llama/Meta-Llama-3.1-405B-Instruct" - }, - { - "id": "meta-llama/llama-3.1-8b-instruct", - "name": "Meta: Llama 3.1 8B Instruct", - "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors.", - "context_length": 131072, - "hugging_face_id": "meta-llama/Meta-Llama-3.1-8B-Instruct" - }, - { - "id": "mistralai/mistral-nemo:free", - "name": "Mistral: Mistral Nemo (free)", - "description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.", - "context_length": 131072, - "hugging_face_id": "mistralai/Mistral-Nemo-Instruct-2407" - }, - { - "id": "mistralai/mistral-nemo", - "name": "Mistral: Mistral Nemo", - "description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.", - "context_length": 131072, - "hugging_face_id": "mistralai/Mistral-Nemo-Instruct-2407" - }, - { - "id": "openai/gpt-4o-mini", - "name": "OpenAI: GPT-4o-mini", - "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4o-mini-2024-07-18", - "name": "OpenAI: GPT-4o-mini (2024-07-18)", - "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "google/gemma-2-27b-it", - "name": "Google: Gemma 2 27B", - "description": "Gemma 2 27B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini).", - "context_length": 8192, - "hugging_face_id": "google/gemma-2-27b-it" - }, - { - "id": "google/gemma-2-9b-it", - "name": "Google: Gemma 2 9B", - "description": "Gemma 2 9B by Google is an advanced, open-source language model that sets a new standard for efficiency and performance in its size class.", - "context_length": 8192, - "hugging_face_id": "google/gemma-2-9b-it" - }, - { - "id": "anthropic/claude-3.5-sonnet-20240620", - "name": "Anthropic: Claude 3.5 Sonnet (2024-06-20)", - "description": "Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "sao10k/l3-euryale-70b", - "name": "Sao10k: Llama 3 Euryale 70B v2.1", - "description": "Euryale 70B v2.1 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k).", - "context_length": 8192, - "hugging_face_id": "Sao10K/L3-70B-Euryale-v2.1" - }, - { - "id": "mistralai/mistral-7b-instruct-v0.3", - "name": "Mistral: Mistral 7B Instruct v0.3", - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.", - "context_length": 32768, - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3" - }, - { - "id": "mistralai/mistral-7b-instruct:free", - "name": "Mistral: Mistral 7B Instruct (free)", - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.", - "context_length": 32768, - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3" - }, - { - "id": "mistralai/mistral-7b-instruct", - "name": "Mistral: Mistral 7B Instruct", - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.", - "context_length": 32768, - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.3" - }, - { - "id": "nousresearch/hermes-2-pro-llama-3-8b", - "name": "NousResearch: Hermes 2 Pro - Llama-3 8B", - "description": "Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON.", - "context_length": 8192, - "hugging_face_id": "NousResearch/Hermes-2-Pro-Llama-3-8B" - }, - { - "id": "microsoft/phi-3-mini-128k-instruct", - "name": "Microsoft: Phi-3 Mini 128K Instruct", - "description": "Phi-3 Mini is a powerful 3.8B parameter model designed for advanced language understanding, reasoning, and instruction following.", - "context_length": 128000, - "hugging_face_id": "microsoft/Phi-3-mini-128k-instruct" - }, - { - "id": "microsoft/phi-3-medium-128k-instruct", - "name": "Microsoft: Phi-3 Medium 128K Instruct", - "description": "Phi-3 128K Medium is a powerful 14-billion parameter model designed for advanced language understanding, reasoning, and instruction following.", - "context_length": 128000, - "hugging_face_id": "microsoft/Phi-3-medium-128k-instruct" - }, - { - "id": "openai/gpt-4o", - "name": "OpenAI: GPT-4o", - "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4o:extended", - "name": "OpenAI: GPT-4o (extended)", - "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4o-2024-05-13", - "name": "OpenAI: GPT-4o (2024-05-13)", - "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "meta-llama/llama-guard-2-8b", - "name": "Meta: LlamaGuard 2 8B", - "description": "This safeguard model has 8B parameters and is based on the Llama 3 family.", - "context_length": 8192, - "hugging_face_id": "meta-llama/Meta-Llama-Guard-2-8B" - }, - { - "id": "meta-llama/llama-3-8b-instruct", - "name": "Meta: Llama 3 8B Instruct", - "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors.", - "context_length": 8192, - "hugging_face_id": "meta-llama/Meta-Llama-3-8B-Instruct" - }, - { - "id": "meta-llama/llama-3-70b-instruct", - "name": "Meta: Llama 3 70B Instruct", - "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors.", - "context_length": 8192, - "hugging_face_id": "meta-llama/Meta-Llama-3-70B-Instruct" - }, - { - "id": "mistralai/mixtral-8x22b-instruct", - "name": "Mistral: Mixtral 8x22B Instruct", - "description": "Mistral's official instruct fine-tuned version of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b).", - "context_length": 65536, - "hugging_face_id": "mistralai/Mixtral-8x22B-Instruct-v0.1" - }, - { - "id": "microsoft/wizardlm-2-8x22b", - "name": "WizardLM-2 8x22B", - "description": "WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model.", - "context_length": 65536, - "hugging_face_id": "microsoft/WizardLM-2-8x22B" - }, - { - "id": "openai/gpt-4-turbo", - "name": "OpenAI: GPT-4 Turbo", - "description": "The latest GPT-4 Turbo model with vision capabilities.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "anthropic/claude-3-haiku", - "name": "Anthropic: Claude 3 Haiku", - "description": "Claude 3 Haiku is Anthropic's fastest and most compact model for\nnear-instant responsiveness.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "anthropic/claude-3-opus", - "name": "Anthropic: Claude 3 Opus", - "description": "Claude 3 Opus is Anthropic's most powerful model for highly complex tasks.", - "context_length": 200000, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-large", - "name": "Mistral Large", - "description": "This is Mistral AI's flagship model, Mistral Large 2 (version `mistral-large-2407`).", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4-turbo-preview", - "name": "OpenAI: GPT-4 Turbo Preview", - "description": "The preview GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-3.5-turbo-0613", - "name": "OpenAI: GPT-3.5 Turbo (older v0613)", - "description": "GPT-3.5 Turbo is OpenAI's fastest model.", - "context_length": 4095, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-small", - "name": "Mistral Small", - "description": "With 22 billion parameters, Mistral Small v24.09 offers a convenient mid-point between (Mistral NeMo 12B)[/mistralai/mistral-nemo] and (Mistral Large 2)[/mistralai/mistral-large], providing a.", - "context_length": 32768, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-tiny", - "name": "Mistral Tiny", - "description": "Note: This model is being deprecated.", - "context_length": 32768, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-7b-instruct-v0.2", - "name": "Mistral: Mistral 7B Instruct v0.2", - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.", - "context_length": 32768, - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.2" - }, - { - "id": "mistralai/mixtral-8x7b-instruct", - "name": "Mistral: Mixtral 8x7B Instruct", - "description": "Mixtral 8x7B Instruct is a pretrained generative Sparse Mixture of Experts, by Mistral AI, for chat and instruction use.", - "context_length": 32768, - "hugging_face_id": "mistralai/Mixtral-8x7B-Instruct-v0.1" - }, - { - "id": "neversleep/noromaid-20b", - "name": "Noromaid 20B", - "description": "A collab between IkariDev and Undi.", - "context_length": 4096, - "hugging_face_id": "NeverSleep/Noromaid-20b-v0.1.1" - }, - { - "id": "alpindale/goliath-120b", - "name": "Goliath 120B", - "description": "A large LLM created by combining two fine-tuned Llama 70B models into one 120B model.", - "context_length": 6144, - "hugging_face_id": "alpindale/goliath-120b" - }, - { - "id": "openrouter/auto", - "name": "Auto Router", - "description": "Your prompt will be processed by a meta-model and routed to one of dozens of models (see below), optimizing for the best possible output.", - "context_length": 2000000, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4-1106-preview", - "name": "OpenAI: GPT-4 Turbo (older v1106)", - "description": "The latest GPT-4 Turbo model with vision capabilities.", - "context_length": 128000, - "hugging_face_id": "" - }, - { - "id": "mistralai/mistral-7b-instruct-v0.1", - "name": "Mistral: Mistral 7B Instruct v0.1", - "description": "A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length.", - "context_length": 2824, - "hugging_face_id": "mistralai/Mistral-7B-Instruct-v0.1" - }, - { - "id": "openai/gpt-3.5-turbo-instruct", - "name": "OpenAI: GPT-3.5 Turbo Instruct", - "description": "This model is a variant of GPT-3.5 Turbo tuned for instructional prompts and omitting chat-related optimizations.", - "context_length": 4095, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-3.5-turbo-16k", - "name": "OpenAI: GPT-3.5 Turbo 16k", - "description": "This model offers four times the context length of gpt-3.5-turbo, allowing it to support approximately 20 pages of text in a single request at a higher cost.", - "context_length": 16385, - "hugging_face_id": "" - }, - { - "id": "mancer/weaver", - "name": "Mancer: Weaver (alpha)", - "description": "An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory.", - "context_length": 8000, - "hugging_face_id": "" - }, - { - "id": "undi95/remm-slerp-l2-13b", - "name": "ReMM SLERP 13B", - "description": "A recreation trial of the original MythoMax-L2-B13 but with updated models.", - "context_length": 6144, - "hugging_face_id": "Undi95/ReMM-SLERP-L2-13B" - }, - { - "id": "gryphe/mythomax-l2-13b", - "name": "MythoMax 13B", - "description": "One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.", - "context_length": 4096, - "hugging_face_id": "Gryphe/MythoMax-L2-13b" - }, - { - "id": "openai/gpt-4-0314", - "name": "OpenAI: GPT-4 (older v0314)", - "description": "GPT-4-0314 is the first version of GPT-4 released, with a context length of 8,192 tokens, and was supported until June 14.", - "context_length": 8191, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-4", - "name": "OpenAI: GPT-4", - "description": "OpenAI's flagship model, GPT-4 is a large-scale multimodal language model capable of solving difficult problems with greater accuracy than previous models due to its broader general knowledge and.", - "context_length": 8191, - "hugging_face_id": "" - }, - { - "id": "openai/gpt-3.5-turbo", - "name": "OpenAI: GPT-3.5 Turbo", - "description": "GPT-3.5 Turbo is OpenAI's fastest model.", - "context_length": 16385, - "hugging_face_id": "" - } - ] -} diff --git a/gui/src/pages/AddNewModel/configs/providers.ts b/gui/src/pages/AddNewModel/configs/providers.ts index 4d2078fb24a..a76651fe6d0 100644 --- a/gui/src/pages/AddNewModel/configs/providers.ts +++ b/gui/src/pages/AddNewModel/configs/providers.ts @@ -3,7 +3,7 @@ import { ModelProviderTags } from "../../../components/modelSelection/utils"; import { completionParamsInputs } from "./completionParamsInputs"; import type { ModelPackage } from "./models"; import { models } from "./models"; -import { openRouterModelsList } from "./openRouterModel"; +import { getOpenRouterModelsList } from "./openRouterModel"; export interface InputDescriptor { inputType: HTMLInputTypeAttribute; @@ -41,6 +41,26 @@ const openSourceModels = Object.values(models).filter( ({ isOpenSource }) => isOpenSource, ); +// Initialize OpenRouter models placeholder +let openRouterModelsList: ModelPackage[] = []; + +/** + * Initialize OpenRouter models by fetching from the API + * This should be called once when the app loads + */ +export async function initializeOpenRouterModels() { + try { + openRouterModelsList = await getOpenRouterModelsList(); + // Update the providers object with the fetched models + if (providers.openrouter) { + providers.openrouter.packages = openRouterModelsList; + } + } catch (error) { + console.error("Failed to initialize OpenRouter models:", error); + openRouterModelsList = []; + } +} + export const apiBaseInput: InputDescriptor = { inputType: "text", key: "apiBase", From 3ca17495f3ce771b08d466ef95ea93e71b21a2de Mon Sep 17 00:00:00 2001 From: Parthasarathy Date: Wed, 26 Nov 2025 23:15:03 +0530 Subject: [PATCH 8/8] feat: enhance OpenRouter model initialization with loading placeholder and effect hook --- gui/src/forms/AddModelForm.tsx | 15 +++++----- .../pages/AddNewModel/configs/providers.ts | 29 ++++++++++++++----- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/gui/src/forms/AddModelForm.tsx b/gui/src/forms/AddModelForm.tsx index 239e7e78488..ae28cd07423 100644 --- a/gui/src/forms/AddModelForm.tsx +++ b/gui/src/forms/AddModelForm.tsx @@ -41,8 +41,10 @@ export function AddModelForm({ const formMethods = useForm(); const ideMessenger = useContext(IdeMessengerContext); - // Initialize OpenRouter models from API - void initializeOpenRouterModels(); + // Initialize OpenRouter models from API on component mount + useEffect(() => { + void initializeOpenRouterModels(); + }, []); const popularProviderTitles = [ providers["openai"]?.title || "", @@ -68,11 +70,10 @@ export function AddModelForm({ .filter((provider) => !popularProviderTitles.includes(provider.title)) .sort((a, b) => a.title.localeCompare(b.title)); - const selectedProviderApiKeyUrl = selectedModel.params.model.startsWith( - "codestral", - ) - ? CODESTRAL_URL - : selectedProvider.apiKeyUrl; + const selectedProviderApiKeyUrl = + selectedModel && selectedModel.params.model.startsWith("codestral") + ? CODESTRAL_URL + : selectedProvider.apiKeyUrl; function isDisabled() { if (selectedProvider.downloadUrl) { diff --git a/gui/src/pages/AddNewModel/configs/providers.ts b/gui/src/pages/AddNewModel/configs/providers.ts index a76651fe6d0..273bf97c549 100644 --- a/gui/src/pages/AddNewModel/configs/providers.ts +++ b/gui/src/pages/AddNewModel/configs/providers.ts @@ -41,23 +41,36 @@ const openSourceModels = Object.values(models).filter( ({ isOpenSource }) => isOpenSource, ); -// Initialize OpenRouter models placeholder -let openRouterModelsList: ModelPackage[] = []; +// Initialize OpenRouter models placeholder with a loading placeholder +const OPENROUTER_LOADING_PLACEHOLDER: ModelPackage = { + title: "Loading models...", + description: "Fetching available models from OpenRouter", + params: { + model: "placeholder", + contextLength: 0, + }, + isOpenSource: false, +}; + +let openRouterModelsList: ModelPackage[] = [OPENROUTER_LOADING_PLACEHOLDER]; /** * Initialize OpenRouter models by fetching from the API - * This should be called once when the app loads + * This should be called once when the component mounts */ export async function initializeOpenRouterModels() { try { - openRouterModelsList = await getOpenRouterModelsList(); - // Update the providers object with the fetched models - if (providers.openrouter) { - providers.openrouter.packages = openRouterModelsList; + const models = await getOpenRouterModelsList(); + if (models.length > 0) { + openRouterModelsList = models; + // Update the providers object with the fetched models + if (providers.openrouter) { + providers.openrouter.packages = openRouterModelsList; + } } } catch (error) { console.error("Failed to initialize OpenRouter models:", error); - openRouterModelsList = []; + // Keep placeholder on error so the UI doesn't break } }