diff --git a/README.md b/README.md index 4d59e8d..fb64b2b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Spreadsheet CFML -Standalone library for working with spreadsheets in CFML ([Lucee](http://lucee.org/) and Adobe ColdFusion), supporting all of ColdFusion's native spreadsheet functionality and much more besides. +Standalone library for working with spreadsheets and CSV in CFML ([Lucee](http://lucee.org/) and Adobe ColdFusion), supporting all of ColdFusion's native spreadsheet functionality and much more besides. ## Minimum Requirements diff --git a/Spreadsheet.cfc b/Spreadsheet.cfc index 27b9fc1..7f59d81 100644 --- a/Spreadsheet.cfc +++ b/Spreadsheet.cfc @@ -1,7 +1,7 @@ component accessors="true"{ //"static" - property name="version" default="3.11.1" setter="false"; + property name="version" default="3.11.1-develop" setter="false"; property name="osgiLibBundleVersion" default="5.2.4.1" setter="false"; //first 3 octets = POI version; increment 4th with other jar updates property name="osgiLibBundleSymbolicName" default="spreadsheet-cfml" setter="false"; property name="exceptionType" default="cfsimplicity.spreadsheet" setter="false"; @@ -1716,6 +1716,10 @@ component accessors="true"{ return this; } + public any function writeCsv(){ + return New objects.WriteCsv( this ); + } + public Spreadsheet function writeFileFromQuery( required query data ,required string filepath diff --git a/helpers/csv.cfc b/helpers/csv.cfc index 2a2ab75..2800f79 100644 --- a/helpers/csv.cfc +++ b/helpers/csv.cfc @@ -70,8 +70,7 @@ component extends="base"{ return dataFromParser( parser ); } finally{ - if( local.KeyExists( "parser" ) ) - parser.close(); + getFileHelper().closeLocalFileOrStream( local, "parser" ); } } diff --git a/helpers/query.cfc b/helpers/query.cfc index b198563..a960439 100644 --- a/helpers/query.cfc +++ b/helpers/query.cfc @@ -7,8 +7,8 @@ component extends="base"{ catch( any exception ){ if( !exception.message CONTAINS "undefined" ) rethrow; - // ACF - return arguments.q.getColumnNames(); + // ACF: the raw object can behave oddly with writeCsv().setQueryColumnsAsHeaderIfRequired(), hence re-casting as a CFML array + return ListToArray( ArrayToList( arguments.q.getColumnNames() ) ); } } diff --git a/objects/BaseCsv.cfc b/objects/BaseCsv.cfc new file mode 100644 index 0000000..6d1d233 --- /dev/null +++ b/objects/BaseCsv.cfc @@ -0,0 +1,120 @@ +component accessors="true"{ + + property name="filepath"; + property name="headerValues" type="array"; + /* Java objects */ + property name="format"; //org.apache.commons.csv.CSVFormat + /* Internal */ + property name="library" setter="false"; + + public BaseCsv function init( required spreadsheetLibrary, string initialPredefinedFormat="DEFAULT" ){ + variables.library = arguments.spreadsheetLibrary; + variables.format = createPredefinedFormat( arguments.initialPredefinedFormat ); + return this; + } + + /* Public builder API */ + public BaseCsv function withPredefinedFormat( required string type ){ + variables.format = createPredefinedFormat( arguments.type ); + return this; + } + + /* Format configuration */ + public BaseCsv function withAllowMissingColumnNames( boolean state=true ){ + variables.format = variables.format.builder().setAllowMissingColumnNames( JavaCast( "boolean", arguments.state ) ).build(); + return this; + } + + public BaseCsv function withAutoFlush( boolean state=true ){ + variables.format = variables.format.builder().setAutoFlush( JavaCast( "boolean", arguments.state ) ).build(); + return this; + } + + public BaseCsv function withCommentMarker( required string marker ){ + variables.format = variables.format.builder().setCommentMarker( JavaCast( "char", arguments.marker ) ).build(); + return this; + } + + public BaseCsv function withDelimiter( required string delimiter ){ + if( variables.library.getCsvHelper().delimiterIsTab( arguments.delimiter ) ){ + variables.format = createPredefinedFormat( "TDF" ); //tabs require several specific settings so use predefined format + return this; + } + variables.format = variables.format.builder().setDelimiter( JavaCast( "string", arguments.delimiter ) ).build(); + return this; + } + + public BaseCsv function withDuplicateHeaderMode( required string value ){ + var mode = variables.library.createJavaObject( "org.apache.commons.csv.DuplicateHeaderMode" )[ JavaCast( "string", arguments.value ) ]; + variables.format = variables.format.builder().setDuplicateHeaderMode( mode ).build(); + return this; + } + + public BaseCsv function withEscapeCharacter( required string character ){ + variables.format = variables.format.builder().setEscape( JavaCast( "char", arguments.character ) ).build(); + return this; + } + + public BaseCsv function withHeader( required array header ){ + variables.headerValues = arguments.header; + variables.format = variables.format.builder().setHeader( JavaCast( "string[]", arguments.header ) ).build(); + return this; + } + + public BaseCsv function withHeaderComments( required array comments ){ + variables.format = variables.format.builder().setHeaderComments( JavaCast( "string[]", arguments.comments ) ).build(); + return this; + } + + public BaseCsv function withIgnoreEmptyLines( boolean state=true ){ + variables.format = variables.format.builder().setIgnoreEmptyLines( JavaCast( "boolean", arguments.state ) ).build(); + return this; + } + + public BaseCsv function withIgnoreHeaderCase( boolean state=true ){ + variables.format = variables.format.builder().setIgnoreHeaderCase( JavaCast( "boolean", arguments.state ) ).build(); + return this; + } + + public BaseCsv function withIgnoreSurroundingSpaces( boolean state=true ){ + variables.format = variables.format.builder().setIgnoreSurroundingSpaces( JavaCast( "boolean", arguments.state ) ).build(); + return this; + } + + public BaseCsv function withNullString( required string value ){ + variables.format = variables.format.builder().setNullString( JavaCast( "string", arguments.value ) ).build(); + return this; + } + + public BaseCsv function withQuoteCharacter( string character ){ + variables.format = variables.format.builder().setQuote( JavaCast( "char", arguments.character ) ).build(); + return this; + } + + public BaseCsv function withQuoteMode( required string value ){ + var mode = variables.library.createJavaObject( "org.apache.commons.csv.QuoteMode" )[ JavaCast( "string", arguments.value ) ]; + variables.format = variables.format.builder().setQuoteMode( mode ).build(); + return this; + } + + public BaseCsv function withSkipHeaderRecord( boolean state=true ){ + variables.format = variables.format.builder().setSkipHeaderRecord( JavaCast( "boolean", arguments.state ) ).build(); + return this; + } + + public BaseCsv function withTrailingDelimiter( boolean state=true ){ + variables.format = variables.format.builder().setTrailingDelimiter( JavaCast( "boolean", arguments.state ) ).build(); + return this; + } + + public BaseCsv function withTrim( boolean state=true ){ + variables.format = variables.format.builder().setTrim( JavaCast( "boolean", arguments.state ) ).build(); + return this; + } + + //Private + private any function createPredefinedFormat( string type="DEFAULT" ){ + return variables.library.createJavaObject( "org.apache.commons.csv.CSVFormat" )[ JavaCast( "string", arguments.type ) ]; + } + +} \ No newline at end of file diff --git a/objects/ReadCsv.cfc b/objects/ReadCsv.cfc index 03f1669..42652e0 100644 --- a/objects/ReadCsv.cfc +++ b/objects/ReadCsv.cfc @@ -1,24 +1,17 @@ -component accessors="true"{ +component extends="BaseCsv" accessors="true"{ - property name="filepath"; - property name="firstRowIsHeader" type="boolean" default=false; - property name="headerValues" type="array"; + property name="firstRowIsHeader" type="boolean" default="false"; property name="numberOfRowsToSkip" default=0; property name="returnFormat" default="none"; property name="rowFilter"; property name="rowProcessor"; - /* Java objects */ - property name="format"; //org.apache.commons.csv.CSVFormat - /* Internal */ - property name="library" setter="false"; public ReadCsv function init( required spreadsheetLibrary, required string filepath ){ - variables.library = arguments.spreadsheetLibrary; + super.init( arguments.spreadsheetLibrary ); variables.library.getFileHelper() .throwErrorIFfileNotExists( arguments.filepath ) .throwErrorIFnotCsvOrTextFile( arguments.filepath ); variables.filepath = arguments.filepath; - variables.format = createPredefinedFormat(); return this; } @@ -29,99 +22,6 @@ component accessors="true"{ return this; } - public ReadCsv function withPredefinedFormat( required string type ){ - variables.format = createPredefinedFormat( arguments.type ); - return this; - } - - /* Format configuration */ - public ReadCsv function withAllowMissingColumnNames( boolean state=true ){ - variables.format = variables.format.builder().setAllowMissingColumnNames( JavaCast( "boolean", arguments.state ) ).build(); - return this; - } - - public ReadCsv function withAutoFlush( boolean state=true ){ - variables.format = variables.format.builder().setAutoFlush( JavaCast( "boolean", arguments.state ) ).build(); - return this; - } - - public ReadCsv function withCommentMarker( required string marker ){ - variables.format = variables.format.builder().setCommentMarker( JavaCast( "char", arguments.marker ) ).build(); - return this; - } - - public ReadCsv function withDelimiter( required string delimiter ){ - if( variables.library.getCsvHelper().delimiterIsTab( arguments.delimiter ) ){ - variables.format = createPredefinedFormat( "TDF" ); //tabs require several specific settings so use predefined format - return this; - } - variables.format = variables.format.builder().setDelimiter( JavaCast( "string", arguments.delimiter ) ).build(); - return this; - } - - public ReadCsv function withDuplicateHeaderMode( required string value ){ - var mode = variables.library.createJavaObject( "org.apache.commons.csv.DuplicateHeaderMode" )[ JavaCast( "string", arguments.value ) ]; - variables.format = variables.format.builder().setDuplicateHeaderMode( mode ).build(); - return this; - } - - public ReadCsv function withEscapeCharacter( required string character ){ - variables.format = variables.format.builder().setEscape( JavaCast( "char", arguments.character ) ).build(); - return this; - } - - public ReadCsv function withHeader( required array header ){ - variables.headerValues = arguments.header; - variables.format = variables.format.builder().setHeader( JavaCast( "string[]", arguments.header ) ).build(); - return this; - } - - public ReadCsv function withHeaderComments( required array comments ){ - variables.format = variables.format.builder().setHeaderComments( JavaCast( "string[]", arguments.comments ) ).build(); - return this; - } - - public ReadCsv function withIgnoreEmptyLines( boolean state=true ){ - variables.format = variables.format.builder().setIgnoreEmptyLines( JavaCast( "boolean", arguments.state ) ).build(); - return this; - } - - public ReadCsv function withIgnoreHeaderCase( boolean state=true ){ - variables.format = variables.format.builder().setIgnoreHeaderCase( JavaCast( "boolean", arguments.state ) ).build(); - return this; - } - - public ReadCsv function withIgnoreSurroundingSpaces( boolean state=true ){ - variables.format = variables.format.builder().setIgnoreSurroundingSpaces( JavaCast( "boolean", arguments.state ) ).build(); - return this; - } - - public ReadCsv function withNullString( required string value ){ - variables.format = variables.format.builder().setNullString( JavaCast( "string", arguments.value ) ).build(); - return this; - } - - public ReadCsv function withQuoteCharacter( string character ){ - variables.format = variables.format.builder().setQuote( JavaCast( "char", arguments.character ) ).build(); - return this; - } - - public ReadCsv function withSkipHeaderRecord( boolean state=true ){ - variables.format = variables.format.builder().setSkipHeaderRecord( JavaCast( "boolean", arguments.state ) ).build(); - return this; - } - - public ReadCsv function withTrailingDelimiter( boolean state=true ){ - variables.format = variables.format.builder().setTrailingDelimiter( JavaCast( "boolean", arguments.state ) ).build(); - return this; - } - - public ReadCsv function withTrim( boolean state=true ){ - variables.format = variables.format.builder().setTrim( JavaCast( "boolean", arguments.state ) ).build(); - return this; - } - - // additional features public ReadCsv function withFirstRowIsHeader( boolean state=true ){ variables.firstRowIsHeader = arguments.state; return this; @@ -178,8 +78,7 @@ component accessors="true"{ } } finally { - if( local.KeyExists( "parser" ) ) - parser.close(); + variables.library.getFileHelper().closeLocalFileOrStream( local, "parser" ); } if( variables.returnFormat == "array" ){ useManuallySpecifiedHeaderForColumnsIfRequired( result ); @@ -199,8 +98,4 @@ component accessors="true"{ return variables.numberOfRowsToSkip && ( arguments.skippedRecords < variables.numberOfRowsToSkip ); } - private any function createPredefinedFormat( string type="DEFAULT" ){ - return variables.library.createJavaObject( "org.apache.commons.csv.CSVFormat" )[ JavaCast( "string", arguments.type ) ]; - } - } \ No newline at end of file diff --git a/objects/WriteCsv.cfc b/objects/WriteCsv.cfc new file mode 100644 index 0000000..1ec1678 --- /dev/null +++ b/objects/WriteCsv.cfc @@ -0,0 +1,199 @@ +component extends="BaseCsv" accessors="true"{ + + property name="data" setter="false"; + property name="parallelThreadsToUse" type="numeric" default=1 setter="false"; + property name="useQueryColumnsAsHeader" type="boolean" default="false" setter="false"; + property name="useStructKeysAsHeader" type="boolean" default="false" setter="false"; + + public WriteCsv function init( required spreadsheetLibrary ){ + super.init( spreadsheetLibrary=arguments.spreadsheetLibrary, initialPredefinedFormat="EXCEL" ); + return this; + } + + /* Public builder API */ + + public WriteCsv function fromData( required any data ){ + if( !IsArray( arguments.data ) && !IsQuery( arguments.data ) ) + Throw( type=variables.library.getExceptionType() & ".invalidDataForCsv", message="Invalid data", detail="Please pass your data as a query, an array of arrays, or an array of structs" ); + variables.data = arguments.data; + return this; + } + + public WriteCsv function toFile( required string path ){ + variables.filepath = arguments.path; + return this; + } + + public WriteCsv function withParallelThreads( numeric numberOfThreads=2 ){ + /* WARNING: can have unexpected results */ + if( !variables.library.engineSupportsParallelLoopProcessing() ) + variables.library.getExceptionHelper().throwParallelOptionNotSupportedException(); + variables.parallelThreadsToUse = Int( arguments.numberOfThreads ); + return this; + } + + public WriteCsv function withQueryColumnsAsHeader( boolean state=true ){ + variables.useQueryColumnsAsHeader = arguments.state; + return this; + } + + public WriteCsv function withStructKeysAsHeader( boolean state=true ){ + variables.useStructKeysAsHeader = arguments.state; + return this; + } + + // final execution + public any function execute(){ + var appendable = newAppendableBuffer(); + printTo( appendable ); + if( IsNull( variables.filepath ) ) + return appendable.toString(); + return this; + } + + /* Private */ + private void function printTo( required appendable ){ + try{ + if( IsQuery( data ) ){ + setQueryColumnsAsHeaderIfRequired(); + var printer = newPrinter( arguments.appendable ); + printFromQuery( printer ); + return; + } + setStructKeysAsHeaderIfRequired(); + var printer = newPrinter( arguments.appendable ); + printFromArray( printer ); + } + finally{ + if( local.KeyExists( "printer" ) ) + printer.close( JavaCast( "boolean", true ) ); + } + } + + private void function setQueryColumnsAsHeaderIfRequired(){ + if( !variables.useQueryColumnsAsHeader ) + return; + var columns = variables.library.getQueryHelper()._QueryColumnArray( variables.data ); + super.withHeader( columns ); + } + + private void function setStructKeysAsHeaderIfRequired(){ + if( !variables.useStructKeysAsHeader || !variables.data.Len() || !IsStruct( variables.data[ 1 ] ) ) + return; + var keys = variables.data[ 1 ].KeyArray(); + super.withHeader( keys ); + } + + private any function newPrinter( required appendable ){ + return variables.library.createJavaObject( "org.apache.commons.csv.CSVPrinter" ).init( arguments.appendable, variables.format ); + } + + private any function newAppendableBuffer(){ + if( IsNull( variables.filepath ) ) + return variables.library.getStringHelper().newJavaStringBuilder(); + return newBufferedFileWriter(); + } + + private any function newBufferedFileWriter(){ + var path = CreateObject( "java", "java.nio.file.Paths" ).get( JavaCast( "string", variables.filepath ), [] ); + var charset = CreateObject( "java", "java.nio.charset.Charset" ).forName( "UTF-8" ); + return CreateObject( "java", "java.nio.file.Files" ).newBufferedWriter( path, charset, [] ); + } + + private void function printFromArray( required printer ){ + if( useParallelThreads() ){ + var printRowFunction = function( row ){ + printRowFromArray( row, printer );//don't scope + }; + printUsingParallelThreads( printRowFunction ); + return; + } + for( var row in variables.data ){ + printRowFromArray( row, arguments.printer ); + } + } + + private void function printFromQuery( required printer ){ + var columns = variables.library.getQueryHelper()._QueryColumnArray( variables.data ); + if( useParallelThreads() ){ + var printRowFunction = function( row ){ + printRowFromQuery( row, columns, printer );//don't scope + }; + printUsingParallelThreads( printRowFunction ); + return; + } + for( var row in variables.data ){ + printRowFromQuery( row, columns, arguments.printer ); + } + } + + private void function printUsingParallelThreads( required function printRowFunction ){ + variables.data.Each( + arguments.printRowFunction + ,true + ,variables.parallelThreadsToUse + ); + } + + private function printRowFromArray( required row, required printer ){ + if( IsStruct( arguments.row ) ) + arguments.row = _StructValueArray( arguments.row ); + arguments.row = checkArrayRow( arguments.row ); + printRow( arguments.printer, arguments.row ); + } + + private function printRowFromQuery( required row, required columns, required printer ){ + arguments.row = convertQueryRowToArray( arguments.row, arguments.columns ); + printRow( arguments.printer, arguments.row ); + } + + private void function printRow( required printer, required array row ){ + arguments.printer.printRecord( JavaCast( "string[]", row ) );//force numbers to strings to avoid 0.0 formatting + } + + private array function checkArrayRow( required array row ){ + var totalColumns = arguments.row.Len(); + cfloop( from=1, to=totalColumns, index="i" ){ + var value = arguments.row[ i ]; + if( !IsSimpleValue( value ) ) + Throw( type=variables.library.getExceptionType() & ".invalidDataForCsv", message="Invalid data", detail="Your data contains complex values which cannot be output to CSV" ); + arguments.row[ i ] = formatDateString( value ); + } + return arguments.row; + } + + private string function formatDateString( required string value ){ + if( !variables.library.getDateHelper().isDateObject( arguments.value ) ) + return arguments.value; + return DateTimeFormat( arguments.value, variables.library.getDateFormats().DATETIME ); + } + + private array function convertQueryRowToArray( required struct row, required array columns ){ + var result = []; + for( var column IN arguments.columns ){ + var cellValue = formatDateString( arguments.row[ column ] ); + result.Append( cellValue ); + }; + return result; + } + + private boolean function useParallelThreads(){ + return variables.library.engineSupportsParallelLoopProcessing() && ( variables.parallelThreadsToUse > 1 ); + } + + private array function _StructValueArray( required struct data ){ + try{ + return StructValueArray( arguments.data ); // Lucee 5.3.8.117+ + } + catch( any exception ){ + if( !exception.message.REFindNoCase( "undefined|no matching function" ) ) + rethrow; + var result = []; + for( var key in arguments.data ){ + result.Append( arguments.data[ key ] ); + } + return result; + } + } + +} \ No newline at end of file diff --git a/test/specs/cellValue.cfm b/test/specs/cellValue.cfm index a7c957e..02c57e0 100644 --- a/test/specs/cellValue.cfm +++ b/test/specs/cellValue.cfm @@ -6,7 +6,7 @@ describe( "cellValue", function(){ }); it( "Gets the value from the specified cell", function(){ - var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a","b" ], [ "c","d" ] ] ); + var data = [ [ "a", "b" ], [ "c", "d" ] ]; workbooks.Each( function( wb ){ s.addRows( wb, data ); expect( s.getCellValue( wb, 2, 2 ) ).toBe( "d" ); diff --git a/test/specs/chaining.cfm b/test/specs/chaining.cfm index 00e671e..6714bc8 100644 --- a/test/specs/chaining.cfm +++ b/test/specs/chaining.cfm @@ -42,7 +42,7 @@ describe( "chaining", function(){ }); it( "Allows the workbook to be generated from a CSV file", function(){ - var csv = 'column1,column2#crlf#"Frumpo McNugget",12345'; + var csv = 'column1,column2#newline#"Frumpo McNugget",12345'; wb = s.newChainable().fromCsv( csv=csv, firstRowIsHeader=true ).getWorkbook(); expect( s.getCellValue( wb, 2, 2 ) ).toBe( "12345" ); }); diff --git a/test/specs/csvToQuery.cfm b/test/specs/csvToQuery.cfm index b062fc2..bc29100 100644 --- a/test/specs/csvToQuery.cfm +++ b/test/specs/csvToQuery.cfm @@ -67,21 +67,21 @@ describe( "csvToQuery", function(){ }); it( "can handle empty cells", function(){ - var csv = 'Frumpo,McNugget#crlf#Susi#crlf#Susi,#crlf#,Sorglos#crlf# '; + var csv = 'Frumpo,McNugget#newline#Susi#newline#Susi,#newline#,Sorglos#newline# '; var expected = QueryNew( "column1,column2", "", [ [ "Frumpo", "McNugget" ], [ "Susi", "" ], [ "Susi", "" ], [ "", "Sorglos" ] ] ); var actual = s.csvToQuery( csv ); expect( actual ).toBe( expected ); }); it( "can treat the first line as the column names", function(){ - var csv = 'Name,Phone#crlf#Frumpo,12345'; + var csv = 'Name,Phone#newline#Frumpo,12345'; var expected = QueryNew( "Name,Phone", "", [ [ "Frumpo", "12345" ] ] ); var actual = s.csvToQuery( csv=csv, firstRowIsHeader=true ); expect( actual ).toBe( expected ); }); it( "can handle spaces in header/column names", function(){ - var csv = 'Name,Phone Number#crlf#Frumpo,12345'; + var csv = 'Name,Phone Number#newline#Frumpo,12345'; if( s.getIsACF() ){ //ACF won't allow spaces in column names when creating queries programmatically. Use setColumnNames() to override: var expected = QueryNew( "column1,column2", "", [ [ "Frumpo", "12345" ] ] ); @@ -94,11 +94,11 @@ describe( "csvToQuery", function(){ }); it( "will preserve the case of header/column names", function(){ - var csv = 'Name,Phone#crlf#Frumpo McNugget,12345'; + var csv = 'Name,Phone#newline#Frumpo McNugget,12345'; var actual = s.csvToQuery( csv=csv, firstRowIsHeader=true ); expect( actual.getColumnNames()[ 1 ] ).toBeWithCase( "Name" ); //invalid variable name - csv = '1st Name,Phone#crlf#Frumpo McNugget,12345'; + csv = '1st Name,Phone#newline#Frumpo McNugget,12345'; actual = s.csvToQuery( csv=csv, firstRowIsHeader=true ); expect( actual.getColumnNames()[ 1 ] ).toBeWithCase( "1st Name" ); }); @@ -106,27 +106,27 @@ describe( "csvToQuery", function(){ describe( "trimming", function(){ it( "will trim the csv string by default", function(){ - var csv = crlf & '"Frumpo McNugget",12345' & crlf; + var csv = newline & '"Frumpo McNugget",12345' & newline; var actual = s.csvToQuery( csv ); expect( actual ).toBe( basicExpectedQuery ); }); it( "will trim the csv file by default", function(){ - var csv = crlf & '"Frumpo McNugget",12345' & crlf; + var csv = newline & '"Frumpo McNugget",12345' & newline; FileWrite( tempCsvPath, csv ); var actual = s.csvToQuery( filepath: tempCsvPath ); expect( actual ).toBe( basicExpectedQuery ); }); it( "can preserve a string's leading/trailing space", function(){ - var csv = crlf & '"Frumpo McNugget",12345' & crlf; + var csv = newline & '"Frumpo McNugget",12345' & newline; var actual = s.csvToQuery( csv: csv, trim: false ); expected = QueryNew( "column1,column2", "", [ [ "", "" ], [ "Frumpo McNugget", "12345" ] ] ); expect( actual ).toBe( expected ); }); it( "can preserve a file's leading/trailing space", function(){ - var csv = crlf & '"Frumpo McNugget",12345' & crlf; + var csv = newline & '"Frumpo McNugget",12345' & newline; FileWrite( tempCsvPath, csv ); var actual = s.csvToQuery( filepath: tempCsvPath, trim: false ); expected = QueryNew( "column1,column2", "", [ [ "", "" ], [ "Frumpo McNugget", "12345" ] ] ); @@ -174,7 +174,7 @@ describe( "csvToQuery", function(){ }); it( "ColumnNames argument overrides firstRowIsHeader: none of the header row values will be used", function(){ - var csv = 'header1,header2#crlf#"Frumpo McNugget",12345'; + var csv = 'header1,header2#newline#"Frumpo McNugget",12345'; var columnNames = [ "name", "phone number" ]; var q = s.csvToQuery( csv=csv, queryColumnNames=columnNames ); expect( q.getColumnNames()[ 1 ] ).toBe( columnNames[ 1 ] ); @@ -182,7 +182,7 @@ describe( "csvToQuery", function(){ }); it( "Allows csv header names to be made safe for query column names", function(){ - var csv = 'id,id,"A B","x/?y","(a)"," A","##1","1a"#crlf#1,2,3,4,5,6,7,8'; + var csv = 'id,id,"A B","x/?y","(a)"," A","##1","1a"#newline#1,2,3,4,5,6,7,8'; var q = s.csvToQuery( csv=csv, firstRowIsHeader=true, makeColumnNamesSafe=true ); expect( q.getColumnNames() ).toBe( [ "id", "id2", "A_B", "x_y", "_a_", "A", "Number1", "_a" ] ); }); @@ -202,7 +202,7 @@ describe( "csvToQuery", function(){ }); it( "allows the query column types to be manually set where the column order isn't known, but the header row values are", function(){ - var csv = 'integer,double,"string column",time#crlf#1,1.1,string,12:00'; + var csv = 'integer,double,"string column",time#newline#1,1.1,string,12:00'; var columnTypes = { "string column": "VARCHAR", "integer": "INTEGER", "time": "TIME", "double": "DOUBLE" };//not in order var q = s.csvToQuery( csv=csv, queryColumnTypes="Integer,Double,VarChar,Time", firstRowIsHeader=true ); var columns = GetMetaData( q ); @@ -235,7 +235,7 @@ describe( "csvToQuery", function(){ }); it( "automatic detecting of query column types ignores blank cells", function(){ - var csv = ',,,#crlf#,2,test,2021-03-10 12:00:00#crlf#1,1.1,string,2021-03-10 12:00:00#crlf#1,,,'; + var csv = ',,,#newline#,2,test,2021-03-10 12:00:00#newline#1,1.1,string,2021-03-10 12:00:00#newline#1,,,'; var q = s.csvToQuery( csv=csv, queryColumnTypes="auto" ); var columns = GetMetaData( q ); expect( columns[ 1 ].typeName ).toBe( "DOUBLE" ); diff --git a/test/specs/dateFormats.cfm b/test/specs/dateFormats.cfm index 8245224..417a5a8 100644 --- a/test/specs/dateFormats.cfm +++ b/test/specs/dateFormats.cfm @@ -90,7 +90,7 @@ describe( "dateFormats customisability",function(){ var actual = s.read( src=path, format="html" ); var expected = "ab104/01/2015 12:0:004/01/2015 1:1:12"; expect( actual ).toBe( expected ); - expected = 'a,b#crlf#1,04/01/2015 12:0:0#crlf#04/01/2015 1:1:1,2'; + expected = 'a,b#newline#1,04/01/2015 12:0:0#newline#04/01/2015 1:1:1,2'; actual = s.read( src=path, format="csv" ); expect( actual ).toBe( expected ); }); diff --git a/test/specs/info.cfm b/test/specs/info.cfm index a59b7cf..fd4ce6f 100644 --- a/test/specs/info.cfm +++ b/test/specs/info.cfm @@ -29,7 +29,8 @@ describe( "info", function(){ it( "Adds and can get back info", function(){ workbooks.Each( function( wb ){ s.addInfo( wb, infoToAdd ); - if( s.isXmlFormat( wb ) ) infoToBeReturned.spreadSheetType = "Excel (2007)"; + if( s.isXmlFormat( wb ) ) + infoToBeReturned.spreadSheetType = "Excel (2007)"; var expected = infoToBeReturned; var actual = s.info( wb ); actual.creationDate = DateFormat( Now(), "yyyymmdd" );// Doesn't return this value so mock @@ -51,7 +52,8 @@ describe( "info", function(){ it( "Can accept a file path instead of a workbook", function(){ workbooks.Each( function( wb ){ - if( s.isXmlFormat( wb ) ) infoToBeReturned.spreadSheetType = "Excel (2007)"; + if( s.isXmlFormat( wb ) ) + infoToBeReturned.spreadSheetType = "Excel (2007)"; var tempPath = s.isXmlFormat( wb )? tempXlsxPath: tempXlsPath; s.addInfo( wb, infoToAdd ) .write( wb, tempPath, true ); @@ -67,7 +69,8 @@ describe( "info", function(){ var actual = s.newChainable( wb ) .addInfo( infoToAdd ) .info(); - if( s.isXmlFormat( wb ) ) infoToBeReturned.spreadSheetType = "Excel (2007)"; + if( s.isXmlFormat( wb ) ) + infoToBeReturned.spreadSheetType = "Excel (2007)"; var expected = infoToBeReturned; actual.creationDate = DateFormat( Now(), "yyyymmdd" );// Doesn't return this value so mock expect( actual ).toBe( expected ); @@ -75,8 +78,10 @@ describe( "info", function(){ }); afterEach( function(){ - if( FileExists( variables.tempXlsPath ) ) FileDelete( variables.tempXlsPath ); - if( FileExists( variables.tempXlsxPath ) ) FileDelete( variables.tempXlsxPath ); + if( FileExists( variables.tempXlsPath ) ) + FileDelete( variables.tempXlsPath ); + if( FileExists( variables.tempXlsxPath ) ) + FileDelete( variables.tempXlsxPath ); }); }); diff --git a/test/specs/queryToCsv.cfm b/test/specs/queryToCsv.cfm index 1f9afaf..8c2a51e 100644 --- a/test/specs/queryToCsv.cfm +++ b/test/specs/queryToCsv.cfm @@ -6,22 +6,22 @@ describe( "queryToCsv", function(){ }); it( "converts a basic query to csv without a header row by default", function(){ - var expected = 'a,b#crlf#c,d'; + var expected = 'a,b#newline#c,d'; expect( s.queryToCsv( data ) ).toBe( expected ); }); it( "uses the query columns as the header row if specified", function(){ - var expected = 'column1,column2#crlf#a,b#crlf#c,d'; + var expected = 'column1,column2#newline#a,b#newline#c,d'; expect( s.queryToCsv( data, true ) ).toBe( expected ); }); it( "allows an alternative to the default comma delimiter", function(){ - var expected = 'a|b#crlf#c|d'; + var expected = 'a|b#newline#c|d'; expect( s.queryToCsv( query=data, delimiter="|" ) ).toBe( expected ); }); it( "allows tabs to be specified as the delimiter in a number of ways", function(){ - var expected = 'a#Chr( 9 )#b#crlf#c#Chr( 9 )#d'; + var expected = 'a#Chr( 9 )#b#newline#c#Chr( 9 )#d'; var validTabValues = [ "#Chr( 9 )#", "\t", "tab", "TAB" ]; for( var value in validTabValues ){ expect( s.queryToCsv( query=data, delimiter=value ) ).toBe( expected ); @@ -30,19 +30,19 @@ describe( "queryToCsv", function(){ it( "can handle an embedded delimiter", function(){ var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a,a", "b" ], [ "c", "d" ] ] ); - var expected = '"a,a",b#crlf#c,d'; + var expected = '"a,a",b#newline#c,d'; expect( s.queryToCsv( data ) ).toBe( expected ); }); it( "can handle an embedded double-quote", function(){ var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a""a", "b" ], [ "c", "d" ] ] ); - var expected = '"a""a",b#crlf#c,d'; + var expected = '"a""a",b#newline#c,d'; expect( s.queryToCsv( data ) ).toBe( expected ); }); it( "can handle an embedded carriage return", function(){ - var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a#crlf#a", "b" ], [ "c", "d" ] ] ); - var expected = '"a#crlf#a",b#crlf#c,d'; + var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a#newline#a", "b" ], [ "c", "d" ] ] ); + var expected = '"a#newline#a",b#newline#c,d'; expect( s.queryToCsv( data ) ).toBe( expected ); }); @@ -65,7 +65,7 @@ describe( "queryToCsv", function(){ ,body=function(){ //can't test if using threads, just that there are no errors var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a", "a" ], [ "a", "a" ] ] ); - var expected = 'a,a#crlf#a,a';//same values because order is not guaranteed + var expected = 'a,a#newline#a,a';//same values because order is not guaranteed expect( s.queryToCsv( query=data, threads=2 ) ).toBe( expected ); } ,skip=function(){ diff --git a/test/specs/read.cfm b/test/specs/read.cfm index b955e04..499021a 100644 --- a/test/specs/read.cfm +++ b/test/specs/read.cfm @@ -367,10 +367,10 @@ describe( "read", function(){ it( "Can return a CSV string from an Excel file", function(){ var path = getTestFilePath( "test.xls" ); - var expected = 'a,b#crlf#1,2015-04-01 00:00:00#crlf#2015-04-01 01:01:01,2'; + var expected = 'a,b#newline#1,2015-04-01 00:00:00#newline#2015-04-01 01:01:01,2'; var actual = s.read( src=path, format="csv" ); expect( actual ).toBe( expected ); - expected = 'a,b#crlf#a,b#crlf#1,2015-04-01 00:00:00#crlf#2015-04-01 01:01:01,2'; + expected = 'a,b#newline#a,b#newline#1,2015-04-01 00:00:00#newline#2015-04-01 01:01:01,2'; actual = s.read( src=path, format="csv", headerRow=1, includeHeaderRow=true ); expect( actual ).toBe( expected ); }); @@ -387,7 +387,7 @@ describe( "read", function(){ it( "Accepts a custom delimiter when generating CSV", function(){ var path = getTestFilePath( "test.xls" ); - var expected = 'a|b#crlf#1|2015-04-01 00:00:00#crlf#2015-04-01 01:01:01|2'; + var expected = 'a|b#newline#1|2015-04-01 00:00:00#newline#2015-04-01 01:01:01|2'; var actual = s.read( src=path, format="csv", csvDelimiter="|" ); expect( actual ).toBe( expected ); }); @@ -804,8 +804,10 @@ describe( "read", function(){ }); afterEach( function(){ - if( FileExists( variables.tempXlsPath ) ) FileDelete( variables.tempXlsPath ); - if( FileExists( variables.tempXlsxPath ) ) FileDelete( variables.tempXlsxPath ); + if( FileExists( variables.tempXlsPath ) ) + FileDelete( variables.tempXlsPath ); + if( FileExists( variables.tempXlsxPath ) ) + FileDelete( variables.tempXlsxPath ); }); }); diff --git a/test/specs/readCsv.cfm b/test/specs/readCsv.cfm index 739c641..0fd6745 100644 --- a/test/specs/readCsv.cfm +++ b/test/specs/readCsv.cfm @@ -36,7 +36,7 @@ describe( "readCsv", function(){ }); it( "allows N rows to be skipped at the start of the file", function(){ - var csv = 'Skip this line#crlf#skip this line too#crlf#"Frumpo McNugget",12345'; + var csv = 'Skip this line#newline#skip this line too#newline#"Frumpo McNugget",12345'; var expected = { columns: [], data: [ [ "Frumpo McNugget", "12345" ] ] }; FileWrite( tempCsvPath, csv ); var actual = s.readCsv( tempCsvPath ) @@ -49,7 +49,7 @@ describe( "readCsv", function(){ describe( "auto header/column handling", function(){ it( "can auto extract the column names from first row if specified", function(){ - var csv = 'name,number#crlf#"Frumpo McNugget",12345'; + var csv = 'name,number#newline#"Frumpo McNugget",12345'; var expected = { columns: [ "name", "number" ], data: [ [ "Frumpo McNugget", "12345" ] ] }; FileWrite( tempCsvPath, csv ); var actual = s.readCsv( tempCsvPath ) @@ -60,7 +60,7 @@ describe( "readCsv", function(){ }); it( "auto extraction treats the first non-skipped row as the header", function(){ - var csv = 'Skip this line#crlf#name,number#crlf#"Frumpo McNugget",12345'; + var csv = 'Skip this line#newline#name,number#newline#"Frumpo McNugget",12345'; var expected = { columns: [ "name", "number" ], data: [ [ "Frumpo McNugget", "12345" ] ] }; FileWrite( tempCsvPath, csv ); var actual = s.readCsv( tempCsvPath ) @@ -72,7 +72,7 @@ describe( "readCsv", function(){ }); it( "adds a manually specified header row to the columns result", function(){ - var csv = 'name,number#crlf#"Frumpo McNugget",12345'; + var csv = 'name,number#newline#"Frumpo McNugget",12345'; var expected = { columns: [ "name", "number" ], data: [ [ "Frumpo McNugget", "12345" ] ] }; FileWrite( tempCsvPath, csv ); var actual = s.readCsv( tempCsvPath ) @@ -88,7 +88,7 @@ describe( "readCsv", function(){ describe( "passing UDFs to readCsv", function(){ it( "allows rows to be filtered out of processing using a passed filter UDF", function(){ - var csv = '"Frumpo McNugget",12345#crlf#"Skip",12345#crlf#"Susi Sorglos",67890'; + var csv = '"Frumpo McNugget",12345#newline#"Skip",12345#newline#"Susi Sorglos",67890'; var expected = { columns: [], data: [ [ "Frumpo McNugget", "12345" ], [ "Susi Sorglos", "67890" ] ] }; FileWrite( tempCsvPath, csv ); var filter = function( rowValues ){ @@ -102,7 +102,7 @@ describe( "readCsv", function(){ }); it( "allows rows to be processed using a passed UDF and the processed values returned", function(){ - var csv = '"Frumpo McNugget",12345#crlf#"Susi Sorglos",67890'; + var csv = '"Frumpo McNugget",12345#newline#"Susi Sorglos",67890'; var expected = { columns: [], data: [ [ "XFrumpo McNugget", "X12345" ], [ "XSusi Sorglos", "X67890" ] ] }; FileWrite( tempCsvPath, csv ); var processor = function( rowValues ){ @@ -119,7 +119,7 @@ describe( "readCsv", function(){ }); it( "allows rows to be processed using a passed UDF without returning any data", function(){ - var csv = '10#crlf#10'; + var csv = '10#newline#10'; var expected = 20; FileWrite( tempCsvPath, csv ); variables.tempTotal = 0; @@ -135,7 +135,7 @@ describe( "readCsv", function(){ }); it( "passes the current record number to the processor UDF", function(){ - var csv = '"Frumpo McNugget",12345#crlf#"Susi Sorglos",67890'; + var csv = '"Frumpo McNugget",12345#newline#"Susi Sorglos",67890'; var expected = [ 1, 2 ]; FileWrite( tempCsvPath, csv ); variables.temp = []; diff --git a/test/specs/readLargeFile.cfm b/test/specs/readLargeFile.cfm index 389f3c6..cf67513 100644 --- a/test/specs/readLargeFile.cfm +++ b/test/specs/readLargeFile.cfm @@ -134,10 +134,10 @@ describe( .addRow( headerRow ) .addRow( dataRow ) .write( tempXlsxPath, true ); - var expected = 'header1,header2#crlf#a,2015-04-01 00:00:00'; + var expected = 'header1,header2#newline#a,2015-04-01 00:00:00'; var actual = s.readLargeFile( src=tempXlsxPath, format="csv" ); expect( actual ).toBe( expected ); - expected = 'header1,header2#crlf#header1,header2#crlf#a,2015-04-01 00:00:00'; + expected = 'header1,header2#newline#header1,header2#newline#a,2015-04-01 00:00:00'; actual = s.readLargeFile( src=tempXlsxPath, format="csv", headerRow=1, includeHeaderRow=true ); expect( actual ).toBe( expected ); }); diff --git a/test/specs/workbookFromCsv.cfm b/test/specs/workbookFromCsv.cfm index 2af75e5..cdfc791 100644 --- a/test/specs/workbookFromCsv.cfm +++ b/test/specs/workbookFromCsv.cfm @@ -2,7 +2,7 @@ describe( "workbookFromCsv", function(){ beforeEach( function(){ - variables.csv = 'column1,column2#crlf#"Frumpo McNugget",12345'; + variables.csv = 'column1,column2#newline#"Frumpo McNugget",12345'; variables.basicExpectedQuery = QueryNew( "column1,column2", "", [ [ "Frumpo McNugget", "12345" ] ] ); }); diff --git a/test/specs/write.cfm b/test/specs/write.cfm index ad46827..c24bd13 100644 --- a/test/specs/write.cfm +++ b/test/specs/write.cfm @@ -2,7 +2,7 @@ describe( "write", function(){ beforeEach( function(){ - sleep( 5 );// allow time for file operations to complete + Sleep( 5 );// allow time for file operations to complete }); it( "Writes an XLS object correctly", function(){ @@ -95,8 +95,10 @@ describe( "write", function(){ }); afterEach( function(){ - if( FileExists( variables.tempXlsPath ) ) FileDelete( variables.tempXlsPath ); - if( FileExists( variables.tempXlsxPath ) ) FileDelete( variables.tempXlsxPath ); + if( FileExists( variables.tempXlsPath ) ) + FileDelete( variables.tempXlsPath ); + if( FileExists( variables.tempXlsxPath ) ) + FileDelete( variables.tempXlsxPath ); }); }); diff --git a/test/specs/writeCsv.cfm b/test/specs/writeCsv.cfm new file mode 100644 index 0000000..ced95b2 --- /dev/null +++ b/test/specs/writeCsv.cfm @@ -0,0 +1,259 @@ + +describe( "writeCsv", function(){ + + //Note: a trailing newline is always expected when printing from Commons CSV + + it( "writeCsv defaults to the EXCEL predefined format", function(){ + var object = s.writeCsv(); + var format = object.getFormat(); + expect( format.equals( format.EXCEL ) ).toBeTrue(); + }); + + describe( "writeCsv can write a csv file or return a csv string", function(){ + + afterEach( function(){ + if( FileExists( tempCsvPath ) ) + FileDelete( tempCsvPath ); + }); + + it( "from an array of arrays", function(){ + var data = [ [ "a", "b" ], [ "c", "d" ] ]; + var expected = "a,b#newline#c,d#newline#"; + var actual = s.writeCsv() + .fromData( data ) + .execute(); + expect( actual ).toBe( expected ); + s.writeCsv() + .toFile( tempCsvPath ) + .fromData( data ) + .execute(); + actual = FileRead( tempCsvPath ); + expect( actual ).toBe( expected ); + }); + + it( "from an array of structs", function(){ + var data = [ [ first: "Frumpo", last: "McNugget" ] ]; + var expected = "Frumpo,McNugget#newline#"; + var actual = s.writeCsv() + .fromData( data ) + .execute(); + expect( actual ).toBe( expected ); + s.writeCsv() + .toFile( tempCsvPath ) + .fromData( data ) + .execute(); + actual = FileRead( tempCsvPath ); + expect( actual ).toBe( expected ); + }); + + it( "from a query", function(){ + var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a", "b" ], [ "c", "d" ] ] ); + var expected = "a,b#newline#c,d#newline#"; + var actual = s.writeCsv() + .fromData( data ) + .execute(); + expect( actual ).toBe( expected ); + s.writeCsv() + .toFile( tempCsvPath ) + .fromData( data ) + .execute(); + actual = FileRead( tempCsvPath ); + expect( actual ).toBe( expected ); + }); + + }); + + it( "allows an alternative to the default comma delimiter", function(){ + var data = [ [ "a", "b" ], [ "c", "d" ] ]; + var expected = "a|b#newline#c|d#newline#"; + var actual = s.writeCsv() + .fromData( data ) + .withDelimiter( "|" ) + .execute(); + expect( actual ).toBe( expected ); + }); + + it( "has special handling when specifying tab as the delimiter", function(){ + var data = [ [ "a", "b" ], [ "c", "d" ] ]; + var validTabValues = [ "#Chr( 9 )#", "\t", "tab", "TAB" ]; + var expected = "a#Chr( 9 )#b#newline#c#Chr( 9 )#d#newline#"; + for( var delimiter in validTabValues ){ + var actual = s.writeCsv() + .fromData( data ) + .withDelimiter( delimiter ) + .execute(); + expect( actual ).toBe( expected ); + } + }); + + it( "can use the query columns as the header row", function(){ + var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a", "b" ], [ "c", "d" ] ] ); + var expected = "column1,column2#newline#a,b#newline#c,d#newline#"; + var actual = s.writeCsv() + .fromData( data ) + .withQueryColumnsAsHeader() + .execute(); + expect( actual ).toBe( expected ); + }); + + it( "can use the row struct keys as the header row", function(){ + var data = [ [ first: "Frumpo", last: "McNugget" ] ]; + var expected = "first,last#newline#Frumpo,McNugget#newline#"; + var actual = s.writeCsv() + .fromData( data ) + .withStructKeysAsHeader() + .execute(); + expect( actual ).toBe( expected ); + }); + + it( "outputs integers correctly with no decimal point", function(){ + var arrayData = [ [ 123 ] ]; + var queryData = QueryNew( "column1", "Integer", arrayData ); + var expected = "123#newline#"; + expect( s.writeCsv().fromData( arrayData ).execute() ).toBe( expected ); + expect( s.writeCsv().fromData( queryData ).execute() ).toBe( expected ); + }); + + it( "outputs date objects using the instance's specified DATETIME format", function(){ + var nowAsText = DateTimeFormat( Now(), s.getDateFormats().DATETIME ); + var arrayData = [ [ ParseDateTime( nowAsText ) ] ]; + var queryData = QueryNew( "column1", "Timestamp", arrayData ); + var expected = "#nowAsText##newline#"; + expect( s.writeCsv().fromData( arrayData ).execute() ).toBe( expected ); + expect( s.writeCsv().fromData( queryData ).execute() ).toBe( expected ); + }); + + it( "does NOT treat date strings as date objects to be formatted using the DATETIME format", function(){ + var dateString = "2022-12-18"; + var data = [ [ dateString ] ]; + var expected = '#dateString##newline#'; + expect( s.writeCsv().fromData( data ).execute() ).toBe( expected ); + }); + + it( "can handle an embedded delimiter", function(){ + var data = [ [ "a,a", "b" ], [ "c", "d" ] ]; + var expected = '"a,a",b#newline#c,d#newline#'; + expect( s.writeCsv().fromData( data ).execute() ).toBe( expected ); + }); + + it( "can handle an embedded double-quote", function(){ + var data = [ [ "a""a", "b" ], [ "c", "d" ] ]; + var expected = '"a""a",b#newline#c,d#newline#'; + expect( s.writeCsv().fromData( data ).execute() ).toBe( expected ); + }); + + it( "can handle an embedded carriage return", function(){ + var data = [ [ "a#newline#a", "b" ], [ "c", "d" ] ]; + var expected = '"a#newline#a",b#newline#c,d#newline#'; + expect( s.writeCsv().fromData( data ).execute() ).toBe( expected ); + }); + + it( + title="can process rows in parallel if the engine supports it" + ,body=function(){ + //can't test if using threads, just that there are no errors + var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a", "a" ], [ "a", "a" ] ] ); + var expected = "a,a#newline#a,a#newline#";//same values because order is not guaranteed + var actual = s.writeCsv() + .fromData( data ) + .withParallelThreads( 2 ) + .execute(); + expect( actual ).toBe( expected ); + } + ,skip=function(){ + //20231031: ACF 2021 and 2023 won't run the whole suite if this test is included: testbox errors thrown + //running just the queryToCsv tests works fine though. Lucee is fine with the whole suite. + return s.getIsACF(); + } + ); + + it( "allows Commons CSV format options to be applied", function(){ + var path = getTestFilePath( "test.csv" ); + var object = s.writeCsv() + .withAutoFlush() + .withCommentMarker( "##" ) + .withDelimiter( "|" ) + .withDuplicateHeaderMode( "ALLOW_EMPTY" ) + .withEscapeCharacter( "\" ) + .withHeader( [ "Name", "Number" ] ) + .withHeaderComments( [ "comment1", "comment2" ] ) + .withIgnoreEmptyLines() + .withIgnoreHeaderCase() + .withIgnoreSurroundingSpaces() + .withNullString( "" ) + .withQuoteCharacter( "'" ) + .withQuoteMode( "NON_NUMERIC" ) + .withSkipHeaderRecord() + .withTrailingDelimiter() + .withTrim(); + expect( object.getFormat().getAutoFlush() ).toBeTrue(); + expect( object.getFormat().getCommentMarker() ).toBe( "##" ); + expect( object.getFormat().getDelimiterString() ).toBe( "|" ); + expect( object.getFormat().getDuplicateHeaderMode().name() ).toBe( "ALLOW_EMPTY" ); + expect( object.getFormat().getEscapeCharacter() ).toBe( "\" ); + expect( object.getFormat().getHeader() ).toBe( [ "Name", "Number" ] ); + expect( object.getFormat().getHeaderComments() ).toBe( [ "comment1", "comment2" ] ); + expect( object.getFormat().getIgnoreEmptyLines() ).toBeTrue(); + expect( object.getFormat().getIgnoreHeaderCase() ).toBeTrue(); + expect( object.getFormat().getIgnoreSurroundingSpaces() ).toBeTrue(); + expect( object.getFormat().getNullString() ).toBe( "" ); + expect( object.getFormat().getQuoteCharacter() ).toBe( "'" ); + expect( object.getFormat().getQuoteMode().name() ).toBe( "NON_NUMERIC" ); + expect( object.getFormat().getSkipHeaderRecord() ).toBeTrue(); + expect( object.getFormat().getTrailingDelimiter() ).toBeTrue(); + expect( object.getFormat().getTrim() ).toBeTrue(); + //reverse check in case any of the above were defaults + object + .withAutoFlush( false ) + .withDuplicateHeaderMode( "ALLOW_ALL" ) + .withIgnoreEmptyLines( false ) + .withIgnoreHeaderCase( false ) + .withIgnoreSurroundingSpaces( false ) + .withQuoteMode( "MINIMAL" ) + .withSkipHeaderRecord( false ) + .withTrailingDelimiter( false ) + .withTrim( false ); + expect( object.getFormat().getAutoFlush() ).toBeFalse(); + expect( object.getFormat().getDuplicateHeaderMode().name() ).toBe( "ALLOW_ALL" ); + expect( object.getFormat().getIgnoreEmptyLines() ).toBeFalse(); + expect( object.getFormat().getIgnoreHeaderCase() ).toBeFalse(); + expect( object.getFormat().getIgnoreSurroundingSpaces() ).toBeFalse(); + expect( object.getFormat().getQuoteMode().name() ).toBe( "MINIMAL" ); + expect( object.getFormat().getSkipHeaderRecord() ).toBeFalse(); + expect( object.getFormat().getTrailingDelimiter() ).toBeFalse(); + expect( object.getFormat().getTrim() ).toBeFalse(); + }); + + describe( "writeCsv() throws an exception if", function(){ + + it( "the data is an array or query", function(){ + expect( function(){ + var data = "string"; + s.writeCsv().fromData( data ).execute(); + }).toThrow( type="cfsimplicity.spreadsheet.invalidDataForCsv" ); + }); + + it( "the data contains complex values", function(){ + expect( function(){ + var complexValue = []; + var data = [ [ complexValue ] ]; + s.writeCsv().fromData( data ).execute(); + }).toThrow( type="cfsimplicity.spreadsheet.invalidDataForCsv" ); + }); + + it( + title="parallel threads are specified and the engine does not support it" + ,body=function(){ + expect( function(){ + s.writeCsv().withParallelThreads(); + }).toThrow( type="cfsimplicity.spreadsheet.parallelOptionNotSupported" ); + } + ,skip=function(){ + return s.engineSupportsParallelLoopProcessing(); + } + ); + + }); + +}); + \ No newline at end of file diff --git a/test/specs/writeFileFromQuery.cfm b/test/specs/writeFileFromQuery.cfm index 39f9a10..43cfc30 100644 --- a/test/specs/writeFileFromQuery.cfm +++ b/test/specs/writeFileFromQuery.cfm @@ -2,7 +2,7 @@ describe( "writeFileFromQuery", function(){ beforeEach( function(){ - sleep( 5 );// allow time for file operations to complete + Sleep( 5 );// allow time for file operations to complete variables.query = QueryNew( "Header1,Header2","VarChar,VarChar",[ [ "a","b" ],[ "c","d" ] ] ); }); @@ -25,12 +25,15 @@ describe( "writeFileFromQuery", function(){ s.writeFileFromQuery( data=query, filepath=tempXlsPath, overwrite=true, xmlFormat=true ); var workbook = s.read( convertedPath ); expect( workbook.getClass().name ).toBe( "org.apache.poi.xssf.usermodel.XSSFWorkbook" ); - if( FileExists( convertedPath ) ) FileDelete( convertedPath ); + if( FileExists( convertedPath ) ) + FileDelete( convertedPath ); }); afterEach( function(){ - if( FileExists( variables.tempXlsPath ) ) FileDelete( variables.tempXlsPath ); - if( FileExists( variables.tempXlsxPath ) ) FileDelete( variables.tempXlsxPath ); + if( FileExists( variables.tempXlsPath ) ) + FileDelete( variables.tempXlsPath ); + if( FileExists( variables.tempXlsxPath ) ) + FileDelete( variables.tempXlsxPath ); }); }); diff --git a/test/specs/writeToCsv.cfm b/test/specs/writeToCsv.cfm index 79c3bba..bd5b528 100644 --- a/test/specs/writeToCsv.cfm +++ b/test/specs/writeToCsv.cfm @@ -2,8 +2,8 @@ describe( "writeToCsv", function(){ beforeEach( function(){ - sleep( 5 );// allow time for file operations to complete - var data = QueryNew( "column1,column2", "VarChar,VarChar", [ [ "a","b" ], [ "c","d" ] ] ); + Sleep( 5 );// allow time for file operations to complete + var data = [ [ "a", "b" ], [ "c", "d" ] ]; variables.workbooks = [ s.newXls(), s.newXlsx() ]; workbooks.Each( function( wb ){ s.addRows( wb, data ); @@ -11,7 +11,7 @@ describe( "writeToCsv", function(){ }); it( "writes a csv file from a spreadsheet object", function(){ - var expectedCsv = 'a,b#crlf#c,d'; + var expectedCsv = 'a,b#newline#c,d'; workbooks.Each( function( wb ){ s.writeToCsv( wb, tempCsvPath, true ); expect( FileRead( tempCsvPath ) ).toBe( expectedCsv ); @@ -19,7 +19,7 @@ describe( "writeToCsv", function(){ }); it( "is chainable", function(){ - var expectedCsv = 'a,b#crlf#c,d'; + var expectedCsv = 'a,b#newline#c,d'; workbooks.Each( function( wb ){ s.newChainable( wb ).writeToCsv( tempCsvPath, true ); expect( FileRead( tempCsvPath ) ).toBe( expectedCsv ); @@ -27,7 +27,7 @@ describe( "writeToCsv", function(){ }); it( "allows an alternative delimiter", function(){ - var expectedCsv = 'a|b#crlf#c|d'; + var expectedCsv = 'a|b#newline#c|d'; workbooks.Each( function( wb ){ s.writeToCsv( wb, tempCsvPath, true, "|" ); expect( FileRead( tempCsvPath ) ).toBe( expectedCsv ); @@ -35,7 +35,7 @@ describe( "writeToCsv", function(){ }); it( "allows the sheet's header row to be excluded", function(){ - var expectedCsv = 'a,b#crlf#c,d'; + var expectedCsv = 'a,b#newline#c,d'; workbooks.Each( function( wb ){ s.addRow( wb, [ "column1", "column2" ], 1 ) .writeToCsv( workbook=wb, filepath=tempCsvPath, overwrite=true, includeHeaderRow=false ); @@ -61,7 +61,8 @@ describe( "writeToCsv", function(){ }); afterEach( function(){ - if( FileExists( tempCsvPath ) ) FileDelete( tempCsvPath ); + if( FileExists( tempCsvPath ) ) + FileDelete( tempCsvPath ); }); }); diff --git a/test/suite.cfc b/test/suite.cfc index c708377..622ac61 100644 --- a/test/suite.cfc +++ b/test/suite.cfc @@ -9,12 +9,14 @@ component extends="testbox.system.BaseSpec"{ variables.s = newSpreadsheetInstance(); function beforeAll(){ - if( !s.getIsACF() ) s.flushOsgiBundle(); - if( server.KeyExists( s.getJavaLoaderName() ) ) server.delete( s.getJavaLoaderName() ); + if( !s.getIsACF() ) + s.flushOsgiBundle(); + if( server.KeyExists( s.getJavaLoaderName() ) ) + server.delete( s.getJavaLoaderName() ); variables.tempXlsPath = ExpandPath( "temp.xls" ); variables.tempXlsxPath = ExpandPath( "temp.xlsx" ); variables.tempCsvPath = ExpandPath( "temp.csv" ); - variables.crlf = Chr( 13 ) & Chr( 10 ); + variables.newline = Chr( 13 ) & Chr( 10 ); variables.spreadsheetTypes = [ "Xls", "Xlsx" ]; }