11package io .frictionlessdata .tableschema ;
22
3+ import com .fasterxml .jackson .core .JsonProcessingException ;
4+ import com .fasterxml .jackson .databind .ObjectMapper ;
35import io .frictionlessdata .tableschema .exception .*;
46import io .frictionlessdata .tableschema .field .Field ;
57import io .frictionlessdata .tableschema .iterator .BeanIterator ;
@@ -411,7 +413,7 @@ public List<Object[]> read(){
411413 /**
412414 * Read all data from the Table and return it as JSON. If no Schema is set on the table, one will be inferred.
413415 * This can be used for smaller data tables but for huge or unknown sizes, there will be performance considerations,
414- * as this method loads all data into RAM *and* does a costly schema inferral .
416+ * as this method loads all data into RAM *and* does a costly schema inferal .
415417 *
416418 * It ignores relations to other data sources.
417419 *
@@ -434,33 +436,57 @@ public String asJson() {
434436 rows .add (obj );
435437 });
436438
437- return JsonUtil .getInstance ().serialize (rows );
439+ String retVal = null ;
440+ ObjectMapper mapper = JsonUtil .getInstance ().getMapper ();
441+ try {
442+ retVal = mapper .writerWithDefaultPrettyPrinter ().writeValueAsString (rows );
443+ } catch (JsonProcessingException ex ) {
444+ throw new JsonSerializingException (ex );
445+ }
446+ return retVal ;
438447 }
439448
440449 /**
441- * Write as CSV file, the `format` parameter decides on the CSV options. If it is
442- * null, then the file will be written as RFC 4180 compliant CSV
443- * @param out the Writer to write to
450+ * Read all data from the Table and return it as a RFC 4180 compliant CSV string.
451+ * Column order will be deducted from the table data source.
452+ *
453+ * @return A CSV representation of the data as a String.
454+ */
455+ public String asCsv () {
456+ return asCsv (null , null );
457+ }
458+
459+ /**
460+ * Return the data as a CSV string,
461+ *
462+ * - the `format` parameter decides on the CSV options. If it is null, then the file will
463+ * be written as RFC 4180 compliant CSV
464+ * - the `headerNames` parameter decides on the order of the headers in the CSV file. If it is null,
465+ * the order of the columns will be the same as in the data source.
466+ *
467+ * It ignores relations to other data sources.
468+ *
444469 * @param format the CSV format to use
445- * @param sortedHeaders the header row names in the order in which data should be
446- * exported
470+ * @param headerNames the header row names in the order in which data should be exported
471+ *
472+ * @return A CSV representation of the data as a String.
447473 */
448- private void writeCsv (Writer out , CSVFormat format , String [] sortedHeaders ) {
474+ public String asCsv (CSVFormat format , String [] headerNames ) {
475+ StringBuilder out = new StringBuilder ();
449476 try {
450- if (null == sortedHeaders ) {
451- writeCsv (out , format , getHeaders ());
452- return ;
477+ if (null == headerNames ) {
478+ return asCsv (format , getHeaders ());
453479 }
454480 CSVFormat locFormat = (null != format )
455481 ? format
456482 : TableDataSource .getDefaultCsvFormat ();
457483
458- locFormat = locFormat .builder ().setHeader (sortedHeaders ).get ();
484+ locFormat = locFormat .builder ().setHeader (headerNames ).get ();
459485 CSVPrinter csvPrinter = new CSVPrinter (out , locFormat );
460486
461487 String [] headers = getHeaders ();
462488 Map <Integer , Integer > mapping
463- = TableSchemaUtil .createSchemaHeaderMapping (headers , sortedHeaders , dataSource .hasReliableHeaders ());
489+ = TableSchemaUtil .createSchemaHeaderMapping (headers , headerNames , dataSource .hasReliableHeaders ());
464490 if ((null != schema )) {
465491 writeCSVData (mapping , schema , csvPrinter );
466492 } else {
@@ -470,6 +496,14 @@ private void writeCsv(Writer out, CSVFormat format, String[] sortedHeaders) {
470496 } catch (IOException ex ) {
471497 throw new TableIOException (ex );
472498 }
499+ String result = out .toString ();
500+ if (result .endsWith ("\n " )) {
501+ result = result .substring (0 , result .length () - 1 );
502+ }
503+ if (result .endsWith ("\r " )) {
504+ result = result .substring (0 , result .length () - 1 );
505+ }
506+ return result ;
473507 }
474508
475509 /**
@@ -715,6 +749,49 @@ public int hashCode() {
715749 }
716750
717751
752+
753+ /**
754+ * Write as CSV file, the `format` parameter decides on the CSV options. If it is
755+ * null, then the file will be written as RFC 4180 compliant CSV
756+ * @param out the Writer to write to
757+ * @param format the CSV format to use
758+ * @param sortedHeaders the header row names in the order in which data should be
759+ * exported
760+ */
761+ private void writeCsv (Writer out , CSVFormat format , String [] sortedHeaders ) {
762+ try {
763+ if (null == sortedHeaders ) {
764+ writeCsv (out , format , getHeaders ());
765+ return ;
766+ }
767+ CSVFormat locFormat = (null != format )
768+ ? format
769+ : TableDataSource .getDefaultCsvFormat ();
770+
771+ locFormat = locFormat .builder ().setHeader (sortedHeaders ).get ();
772+ CSVPrinter csvPrinter = new CSVPrinter (out , locFormat );
773+
774+ String [] headers = getHeaders ();
775+ Map <Integer , Integer > mapping
776+ = TableSchemaUtil .createSchemaHeaderMapping (headers , sortedHeaders , dataSource .hasReliableHeaders ());
777+ if ((null != schema )) {
778+ writeCSVData (mapping , schema , csvPrinter );
779+ } else {
780+ writeCSVData (mapping , csvPrinter );
781+ }
782+ csvPrinter .close ();
783+ } catch (IOException ex ) {
784+ throw new TableIOException (ex );
785+ }
786+ }
787+
788+
789+ /**
790+ * Append the data to a {@link org.apache.commons.csv.CSVPrinter}. Column sorting is according to the mapping
791+ * @param mapping the mapping of the column numbers in the CSV file to the column numbers in the data source
792+ * @param schema the Schema to use for formatting the data
793+ * @param csvPrinter the CSVPrinter to write to
794+ */
718795 private void writeCSVData (Map <Integer , Integer > mapping , Schema schema , CSVPrinter csvPrinter ) {
719796 Iterator <Object > iter = this .iterator (false , false , true , false );
720797 iter .forEachRemaining ((rec ) -> {
0 commit comments