|
325 | 325 | }, |
326 | 326 | { |
327 | 327 | "cell_type": "code", |
328 | | - "execution_count": null, |
| 328 | + "execution_count": 38, |
329 | 329 | "metadata": {}, |
330 | 330 | "outputs": [ |
331 | 331 | { |
|
335 | 335 | "Model 'NucleiSegmentationBoundaryModel' requires 1 input(s) with the following features:\n", |
336 | 336 | "\n", |
337 | 337 | "input 'input0' with axes:\n", |
338 | | - "[BatchAxis(id='batch', description='', type='batch', size=None),\n", |
339 | | - " ChannelAxis(id='channel', description='', type='channel', channel_names=['channel0']),\n", |
340 | | - " SpaceInputAxis(size=ParameterizedSize(min=64, step=16), id='y', description='', type='space', unit=None, scale=1.0, concatenable=False),\n", |
341 | | - " SpaceInputAxis(size=ParameterizedSize(min=64, step=16), id='x', description='', type='space', unit=None, scale=1.0, concatenable=False)]\n", |
| 338 | + "[('batch', None),\n", |
| 339 | + " ('channel', 1),\n", |
| 340 | + " ('y', ParameterizedSize(min=64, step=16)),\n", |
| 341 | + " ('x', ParameterizedSize(min=64, step=16))]\n", |
342 | 342 | "Data description: type='float32' range=(None, None) unit='arbitrary unit' scale=1.0 offset=None\n", |
343 | | - "Test tensor available at: https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/affable-shark/1.2/files/test_input_0.npy\n", |
344 | | - "This input is preprocessed with: \n", |
345 | | - "id='ensure_dtype' kwargs=EnsureDtypeKwargs(dtype='float32')\n", |
346 | | - "id='zero_mean_unit_variance' kwargs=ZeroMeanUnitVarianceKwargs(axes=['channel', 'y', 'x'], eps=1e-06)\n", |
347 | | - "id='ensure_dtype' kwargs=EnsureDtypeKwargs(dtype='float32')\n" |
| 343 | + "Test tensor available at: https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/affable-shark/1.2/files/test_input_0.npy\n" |
348 | 344 | ] |
349 | 345 | } |
350 | 346 | ], |
|
355 | 351 | ")\n", |
356 | 352 | "for ipt in model.inputs:\n", |
357 | 353 | " print(f\"\\ninput '{ipt.id}' with axes:\")\n", |
358 | | - " pprint(ipt.axes)\n", |
| 354 | + " pprint([(a.id, a.size) for a in ipt.axes])\n", |
359 | 355 | " print(f\"Data description: {ipt.data}\")\n", |
360 | | - " print(f\"Test tensor available at: {ipt.test_tensor.source.absolute()}\")\n", |
361 | | - " if len(ipt.preprocessing) > 1:\n", |
362 | | - " print(\"This input is preprocessed with: \")\n", |
363 | | - " for p in ipt.preprocessing:\n", |
364 | | - " print(p)" |
| 356 | + " print(f\"Test tensor available at: {ipt.test_tensor.source.absolute()}\")" |
| 357 | + ] |
| 358 | + }, |
| 359 | + { |
| 360 | + "cell_type": "markdown", |
| 361 | + "metadata": {}, |
| 362 | + "source": [ |
| 363 | + "**Note** Batch size of None is a convention and just means that there is no restriction on batch size and it can be any positive integer." |
| 364 | + ] |
| 365 | + }, |
| 366 | + { |
| 367 | + "cell_type": "markdown", |
| 368 | + "metadata": {}, |
| 369 | + "source": [ |
| 370 | + "**Note** In the above print out we can also see the axis order that the model expects, in some cases it may be importatn to explicity set this order and ensure that your input data matches. As mentioned before can set he axes order by using a `bioimageio.core.Tensors/xarray.DataArrays` instance." |
365 | 371 | ] |
366 | 372 | }, |
367 | 373 | { |
368 | 374 | "cell_type": "code", |
369 | | - "execution_count": null, |
| 375 | + "execution_count": 39, |
| 376 | + "metadata": {}, |
| 377 | + "outputs": [ |
| 378 | + { |
| 379 | + "name": "stdout", |
| 380 | + "output_type": "stream", |
| 381 | + "text": [ |
| 382 | + "tensor shape: Frozen({'batch': 1, 'channel': 1, 'y': 256, 'x': 256})\n" |
| 383 | + ] |
| 384 | + } |
| 385 | + ], |
| 386 | + "source": [ |
| 387 | + "input_tensor = Tensor.from_numpy(input_image, dims=model.inputs[0].axes)\n", |
| 388 | + "\n", |
| 389 | + "# print the axis annotations ('dims') and the shape of the input array\n", |
| 390 | + "print(f\"tensor shape: {input_tensor.tagged_shape}\")" |
| 391 | + ] |
| 392 | + }, |
| 393 | + { |
| 394 | + "cell_type": "code", |
| 395 | + "execution_count": 40, |
370 | 396 | "metadata": {}, |
371 | 397 | "outputs": [ |
372 | 398 | { |
|
391 | 417 | } |
392 | 418 | ], |
393 | 419 | "source": [ |
394 | | - "input = {model.inputs[0].id: input_image}\n", |
| 420 | + "input = {model.inputs[0].id: input_tensor}\n", |
395 | 421 | "\n", |
396 | 422 | "prediction: Sample = predict(model=model, inputs=input)\n", |
397 | 423 | "\n", |
|
0 commit comments