Skip to content

Commit 89c6781

Browse files
committed
docs: documentation, test, and some model changes
1 parent 55aec8c commit 89c6781

File tree

10 files changed

+259
-21
lines changed

10 files changed

+259
-21
lines changed

assistant/v1.ts

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5336,8 +5336,9 @@ namespace AssistantV1 {
53365336
message_to_human_agent?: string;
53375337
/** The text of the search query. This can be either a natural-language query or a query that uses the Discovery
53385338
* query language syntax, depending on the value of the **query_type** property. For more information, see the
5339-
* [Discovery service documentation](https://cloud.ibm.com/docs/discovery?topic=discovery-query-operators#query-operators).
5340-
* Required when **response_type**=`search_skill`.
5339+
* [Discovery service
5340+
* documentation](https://cloud.ibm.com/docs/discovery?topic=discovery-query-operators#query-operators). Required
5341+
* when **response_type**=`search_skill`.
53415342
*/
53425343
query?: string;
53435344
/** The type of the search query. Required when **response_type**=`search_skill`. */
@@ -5662,6 +5663,25 @@ namespace AssistantV1 {
56625663
export interface MessageInput {
56635664
/** The text of the user input. This string cannot contain carriage return, newline, or tab characters. */
56645665
text?: string;
5666+
/** Whether to use spelling correction when processing the input. This property overrides the value of the
5667+
* **spelling_suggestions** property in the workspace settings.
5668+
*/
5669+
spelling_suggestions?: boolean;
5670+
/** Whether to use autocorrection when processing the input. If spelling correction is used and this property is
5671+
* `false`, any suggested corrections are returned in the **suggested_text** property of the message response. If
5672+
* this property is `true`, any corrections are automatically applied to the user input, and the original text is
5673+
* returned in the **original_text** property of the message response. This property overrides the value of the
5674+
* **spelling_auto_correct** property in the workspace settings.
5675+
*/
5676+
spelling_auto_correct?: boolean;
5677+
/** Any suggested corrections of the input text. This property is returned only if spelling correction is
5678+
* enabled and autocorrection is disabled.
5679+
*/
5680+
suggested_text?: string;
5681+
/** The original user input text. This property is returned only if autocorrection is enabled and the user input
5682+
* was corrected.
5683+
*/
5684+
original_text?: string;
56655685
/** MessageInput accepts additional properties. */
56665686
[propName: string]: any;
56675687
}
@@ -6073,6 +6093,14 @@ namespace AssistantV1 {
60736093
disambiguation?: WorkspaceSystemSettingsDisambiguation;
60746094
/** For internal use only. */
60756095
human_agent_assist?: JsonObject;
6096+
/** Whether spelling correction is enabled for the workspace. */
6097+
spelling_suggestions?: boolean;
6098+
/** Whether autocorrection is enabled for the workspace. If spelling correction is enabled and this property is
6099+
* `false`, any suggested corrections are returned in the **suggested_text** property of the message response. If
6100+
* this property is `true`, any corrections are automatically applied to the user input, and the original text is
6101+
* returned in the **original_text** property of the message response.
6102+
*/
6103+
spelling_auto_correct?: boolean;
60766104
/** Workspace settings related to the behavior of system entities. */
60776105
system_entities?: WorkspaceSystemSettingsSystemEntities;
60786106
/** Workspace settings related to detection of irrelevant input. */

discovery/v2.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -689,6 +689,9 @@ class DiscoveryV2 extends BaseService {
689689
* **Note:** This operation only works on collections created to accept direct file uploads. It cannot be used to
690690
* modify a collection that connects to an external source such as Microsoft SharePoint.
691691
*
692+
* **Note:** If an uploaded document is segmented, all segments will be overwritten, even if the updated version of
693+
* the document has fewer segments.
694+
*
692695
* @param {Object} params - The parameters to send to the service.
693696
* @param {string} params.projectId - The ID of the project. This information can be found from the deploy page of the
694697
* Discovery administrative tooling.
@@ -785,6 +788,9 @@ class DiscoveryV2 extends BaseService {
785788
* **Note:** This operation only works on collections created to accept direct file uploads. It cannot be used to
786789
* modify a collection that connects to an external source such as Microsoft SharePoint.
787790
*
791+
* **Note:** Segments of an uploaded document cannot be deleted individually. Delete all segments by deleting using
792+
* the `parent_document_id` of a segment result.
793+
*
788794
* @param {Object} params - The parameters to send to the service.
789795
* @param {string} params.projectId - The ID of the project. This information can be found from the deploy page of the
790796
* Discovery administrative tooling.

natural-language-understanding/v1.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -744,6 +744,10 @@ namespace NaturalLanguageUnderstandingV1 {
744744
/** ID of the Watson Knowledge Studio workspace that deployed this model to Natural Language Understanding. */
745745
workspace_id?: string;
746746
/** The model version, if it was manually provided in Watson Knowledge Studio. */
747+
model_version?: string;
748+
/** (Deprecated — use `model_version`) The model version, if it was manually provided in Watson Knowledge
749+
* Studio.
750+
*/
747751
version?: string;
748752
/** The description of the version, if it was manually provided in Watson Knowledge Studio. */
749753
version_description?: string;

secrets.tar.enc

0 Bytes
Binary file not shown.

test/integration/assistant.v2.test.js

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,20 @@ describe('assistant v2 integration', () => {
8484
});
8585
});
8686

87+
it('should messageStateless', done => {
88+
const params = {
89+
assistantId,
90+
input: { text: 'Hello' },
91+
};
92+
93+
assistant.messageStateless(params, (err, res) => {
94+
expect(err).toBeNull();
95+
const { result } = res || {};
96+
expect(result).toBeDefined();
97+
done();
98+
});
99+
});
100+
87101
it('should deleteSession', done => {
88102
if (!sessionId) {
89103
// We cannot run this test when session creation failed.

test/integration/visual-recognition.v4.test.js

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ describe('visual recognition v4 integration', () => {
2929
const visualRecognition = new VisualRecognitionV4(options);
3030

3131
// testing collections
32-
const { giraffesCollectionId } = options;
32+
const { testCollectionId } = options;
3333

3434
let collectionId;
3535
let imageId;
@@ -40,7 +40,7 @@ describe('visual recognition v4 integration', () => {
4040
describe('analysis', () => {
4141
test('analyze', async done => {
4242
const params = {
43-
collectionIds: [giraffesCollectionId],
43+
collectionIds: [testCollectionId],
4444
features: 'objects',
4545
imagesFile: [{ data: fs.createReadStream(__dirname + '/../resources/potato.jpeg') }],
4646
};
@@ -58,6 +58,27 @@ describe('visual recognition v4 integration', () => {
5858
expect(result.images).toBeDefined();
5959
done();
6060
});
61+
62+
test('getModelFile', async done => {
63+
const params = {
64+
collectionId: testCollectionId,
65+
feature: 'objects',
66+
modelFormat: 'rscnn',
67+
};
68+
69+
let res;
70+
try {
71+
res = await visualRecognition.getModelFile(params);
72+
} catch (err) {
73+
return done(err);
74+
}
75+
76+
expect(res).toBeDefined();
77+
const { result } = res || {};
78+
expect(result).toBeDefined();
79+
expect(isStream(result)).toBe(true);
80+
done();
81+
});
6182
});
6283

6384
describe('collections', () => {

test/unit/assistant.v2.test.js

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -279,4 +279,84 @@ describe('AssistantV2', () => {
279279
});
280280
});
281281
});
282+
describe('messageStateless', () => {
283+
describe('positive tests', () => {
284+
test('should pass the right params to createRequest', () => {
285+
// parameters
286+
const assistantId = 'fake_assistantId';
287+
const input = 'fake_input';
288+
const context = 'fake_context';
289+
const params = {
290+
assistantId,
291+
input,
292+
context,
293+
};
294+
295+
const messageStatelessResult = assistant.messageStateless(params);
296+
297+
// all methods should return a Promise
298+
expectToBePromise(messageStatelessResult);
299+
300+
// assert that create request was called
301+
expect(createRequestMock).toHaveBeenCalledTimes(1);
302+
303+
const options = getOptions(createRequestMock);
304+
305+
checkUrlAndMethod(options, '/v2/assistants/{assistant_id}/message', 'POST');
306+
const expectedAccept = 'application/json';
307+
const expectedContentType = 'application/json';
308+
checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType);
309+
expect(options.body['input']).toEqual(input);
310+
expect(options.body['context']).toEqual(context);
311+
expect(options.path['assistant_id']).toEqual(assistantId);
312+
});
313+
314+
test('should prioritize user-given headers', () => {
315+
// parameters
316+
const assistantId = 'fake_assistantId';
317+
const userAccept = 'fake/header';
318+
const userContentType = 'fake/header';
319+
const params = {
320+
assistantId,
321+
headers: {
322+
Accept: userAccept,
323+
'Content-Type': userContentType,
324+
},
325+
};
326+
327+
assistant.messageStateless(params);
328+
checkMediaHeaders(createRequestMock, userAccept, userContentType);
329+
});
330+
});
331+
332+
describe('negative tests', () => {
333+
test('should enforce required parameters', async done => {
334+
// required parameters for this method
335+
const requiredParams = ['assistantId'];
336+
337+
let err;
338+
try {
339+
await assistant.messageStateless({});
340+
} catch (e) {
341+
err = e;
342+
}
343+
344+
expect(err.message).toMatch(/Missing required parameters/);
345+
done();
346+
});
347+
348+
test('should reject promise when required params are not given', done => {
349+
// required parameters for this method
350+
const requiredParams = ['assistantId'];
351+
352+
const messageStatelessPromise = assistant.messageStateless();
353+
expectToBePromise(messageStatelessPromise);
354+
355+
messageStatelessPromise.catch(err => {
356+
expect(err.message).toMatch(/Missing required parameters/);
357+
done();
358+
});
359+
});
360+
});
361+
});
282362
});

test/unit/authorization.test.js

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
const Authorization = require('../../dist/authorization/v1');
44
const { BasicAuthenticator, IamAuthenticator } = require('ibm-cloud-sdk-core');
55

6-
describe('authorization', function () {
6+
describe('authorization', function() {
77
const url = 'http://ibm.com:80/text-to-speech-beta/api/foo/bar';
88
const service = {
99
url,
@@ -30,38 +30,38 @@ describe('authorization', function () {
3030
createRequestMock.mockImplementation(params => Promise.resolve(mockToken));
3131

3232
function missingParameter(done) {
33-
return function (err) {
33+
return function(err) {
3434
expect(err).toBeInstanceOf(Error);
3535
expect(/required parameters/.test(err)).toBe(true);
3636
done();
3737
};
3838
}
3939

4040
function checkToken(done) {
41-
return function (err, res) {
41+
return function(err, res) {
4242
expect(err).toBeNull();
4343
expect(res).toBe(mockToken);
4444
done();
4545
};
4646
}
4747

48-
describe('getToken()', function () {
49-
it('should check for missing url param', function (done) {
48+
describe('getToken()', function() {
49+
it('should check for missing url param', function(done) {
5050
const params = {
5151
noturl: url,
5252
};
5353
authorization.getToken(params, missingParameter(done));
5454
});
5555

56-
it('should generate a valid token payload', function (done) {
56+
it('should generate a valid token payload', function(done) {
5757
authorization.getToken({ url: 'http://ibm.com/myservice/myresource' }, checkToken(done));
5858
});
5959

60-
it('should default to url from credentials', function (done) {
60+
it('should default to url from credentials', function(done) {
6161
authorization.getToken(checkToken(done));
6262
});
6363

64-
it('should return an iam access token if given iam_api_key', function (done) {
64+
it('should return an iam access token if given iam_api_key', function(done) {
6565
expect(authorizationIam.tokenManager).not.toBeNull();
6666

6767
// mock the token manager request method

test/unit/visual-recognition.v4.test.js

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -468,6 +468,91 @@ describe('VisualRecognitionV4', () => {
468468
});
469469
});
470470
});
471+
describe('getModelFile', () => {
472+
describe('positive tests', () => {
473+
test('should pass the right params to createRequest', () => {
474+
// parameters
475+
const collectionId = 'fake_collectionId';
476+
const feature = 'fake_feature';
477+
const modelFormat = 'fake_modelFormat';
478+
const params = {
479+
collectionId,
480+
feature,
481+
modelFormat,
482+
};
483+
484+
const getModelFileResult = visualRecognition.getModelFile(params);
485+
486+
// all methods should return a Promise
487+
expectToBePromise(getModelFileResult);
488+
489+
// assert that create request was called
490+
expect(createRequestMock).toHaveBeenCalledTimes(1);
491+
492+
const options = getOptions(createRequestMock);
493+
494+
checkUrlAndMethod(options, '/v4/collections/{collection_id}/model', 'GET');
495+
const expectedAccept = 'application/octet-stream';
496+
const expectedContentType = undefined;
497+
checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType);
498+
expect(options.qs['feature']).toEqual(feature);
499+
expect(options.qs['model_format']).toEqual(modelFormat);
500+
expect(options.path['collection_id']).toEqual(collectionId);
501+
expect(options.responseType).toBe('stream');
502+
});
503+
504+
test('should prioritize user-given headers', () => {
505+
// parameters
506+
const collectionId = 'fake_collectionId';
507+
const feature = 'fake_feature';
508+
const modelFormat = 'fake_modelFormat';
509+
const userAccept = 'fake/header';
510+
const userContentType = 'fake/header';
511+
const params = {
512+
collectionId,
513+
feature,
514+
modelFormat,
515+
headers: {
516+
Accept: userAccept,
517+
'Content-Type': userContentType,
518+
},
519+
};
520+
521+
visualRecognition.getModelFile(params);
522+
checkMediaHeaders(createRequestMock, userAccept, userContentType);
523+
});
524+
});
525+
526+
describe('negative tests', () => {
527+
test('should enforce required parameters', async done => {
528+
// required parameters for this method
529+
const requiredParams = ['collectionId', 'feature', 'modelFormat'];
530+
531+
let err;
532+
try {
533+
await visualRecognition.getModelFile({});
534+
} catch (e) {
535+
err = e;
536+
}
537+
538+
expect(err.message).toMatch(/Missing required parameters/);
539+
done();
540+
});
541+
542+
test('should reject promise when required params are not given', done => {
543+
// required parameters for this method
544+
const requiredParams = ['collectionId', 'feature', 'modelFormat'];
545+
546+
const getModelFilePromise = visualRecognition.getModelFile();
547+
expectToBePromise(getModelFilePromise);
548+
549+
getModelFilePromise.catch(err => {
550+
expect(err.message).toMatch(/Missing required parameters/);
551+
done();
552+
});
553+
});
554+
});
555+
});
471556
describe('addImages', () => {
472557
describe('positive tests', () => {
473558
test('should pass the right params to createRequest', () => {

0 commit comments

Comments
 (0)