export [<TABLE_VIEW_QUERY_PROC>] [--schema INPUT] [--exclude-views] [--exclude-tables] [-c|--connstr CONNECTION_STRING] [-n|--credname ENTRY_NAME] [--culture VALUE] [--excel-modify-password VALUE] [--excel-open-password VALUE] [--excel-preserve-target-format] [--excel-treat-value-as-formula] [--excel-clear-target-sheet] [--excel-use-report-template] [--excel-report-template-relations VALUE] [--excel-report-template-path FILE_PATH] [--single-dataset] [--no-null-prop] [--no-empty-array] [--no-empty-nested-prop] [--xml-prefix PREFIX] [--xml-namespaces KEY_VALUE_PAIRS] [--xml-row-element NAME] [--xml-root-element NAME] [--xml-standalone VALUE] [--xml-no-declare] [--enable-custom-replace] [--custom-replace-rules RULES] [-m|--map MAP_FILE_PATH] [-o|--out FILE_PATH] [--print-landscape] [--print-papersize {Undefined|Letter|Lettersmall|Tabloid|Ledger|Legal] |Statement|Executive|A3|A4|A4small|A5|B4_JIS|B5_JIS|Folio|Quarto|s10x1] 4|s11x17|Note|Envelope9|Envelope10|Envelope11|Envelope12|Envelope14|C|] D|E|EnvelopeDL|EnvelopeC5|EnvelopeC3|EnvelopeC4|EnvelopeC6|EnvelopeC6_] C5|B4_ISO|B5_ISO|B6_ISO|EnvelopeItaly|EnvelopeMonarch|s63_4Envelope|US] StandardFanfold|GermanStdFanfold|GermanLegalFanfold|B4_ISO_2|JapaneseP] ostcard|s9x11|s10x11|s15x11|EnvelopeInvite|LetterExtra|LegalExtra|Tabl] oidExtra|A4Extra|LetterTransverse|A4Transverse|LetterExtraTransv|Super] A_A4|SuperB_A3|LetterPlus|A4Plus|A5Transverse|B5_JIS_Transverse|A3Extr] a|A5Extra|B5_ISO_Extra|A2|A3Transverse|A3ExtraTransverse|DblJapanesePo] stcard|A6|LetterRotated|A3Rotated|A4Rotated|A5Rotated|B4_JIS_Rotated|B] 5_JIS_Rotated|JapanesePostcardRot|DblJapPostcardRot|A6Rotated|B6_JIS|B] 6_JIS_Rotated|s12x11}] [--append] [--csv-rowsep SEP_CHAR] [--nohdr] [--csv-colsep SEP_CHAR] [--groupby COLUMN_NAME] [--gzip] [--keep-source] [--csv-quote] [--no-csv-quote] [--throw-err-if-nodata] [--cmd-timeout TIMEOUT_SEC] [--enc {Default|ASCII|UTF8|UTF16|UTF32|UTF8WithoutBOM|UTF32WithoutBOM] |UTF7|UTF7WithoutBOM|UTF16WithoutBOM|BigEndian|BigEndianWithoutBOM}] [--enc-ascii] [--enc-utf7] [--enc-utf8] [--enc-utf8-nobom] [--enc-utf16] [--enc-utf16-nobom] [--enc-utf32] [--charset INPUT] [--source-split-char INPUT] [--dateformat INPUT] [--split] [-y|--overwrite] [--splitsize FILE_SIZE] [--splitrows ROW_COUNT] [--maxrows ROW_COUNT] [--excel-sheetname NAME] [--excel-offset CELL_LOCATION] [--excel-clear-range RANGE] [--excel-saveas-multi-tab] [--excel-split-ontab] [--json-multi-content] [--json-array] [--pretty-print] [--excel-ver EXCEL_VERSION] [--csv] [--always-quote] [--json] [--xml] [--excel] [--pdf] [--html] [--consoleout FILEPATH] [-v|--verbose] [-w|--wait] [-i|--interactive]
Parameter | Description |
---|---|
<TABLE_VIEW_QUERY_PROC> |
Source for the data. This can be table(s), query, view(s) or proc. If its other than table or view then you must specify --proc or --query parameter to indicate access mode. You may list multiple objects by separating them using vertical bar (e.g table1|table2|view1). You can also use pattern for objects (e.g. cust%|prod%) |
--schema |
Schema name or pattern to search for objects when object name contains pattern (e.g. Cust%) |
--exclude-views |
If pattern used on object name then do not include matching views in the objects to export. |
--exclude-tables |
If pattern used on object name then do not include matching views in the objects to export. |
-c | --connstr CONNECTION_STRING |
Source connection string. This can be ODBC, OLEDB or ADO.net connection string. NOTE: For ADO.net driver only sql server is supported. |
-n | --credname ENTRY_NAME |
Entry name of credential you saved earlier using setauth command. |
--culture VALUE |
Sets culture for numeric format (e.g. de-De). |
--excel-modify-password VALUE |
Sets the password for edit excel file action. So any time user tries to save the file it will prompt for password. |
--excel-open-password VALUE |
Sets the password for open excel file action. So any time user open the file it will prompt for password. |
--excel-preserve-target-format |
Preserve target cell format when clear target sheet option is turned on. |
--excel-treat-value-as-formula |
Set this option to true if your value may be formula. When this option is true then any Value starting with = sign will be treated as Formula (e.g. =A1+C1) |
--excel-clear-target-sheet |
When this option is true it clears all used cells in the target sheet before writing any data. |
--excel-use-report-template |
Output excel file using report template approach (formatting, expressions, datasets). See this link for more information https://zappysys.com/links/?url=https://zappysys.com/blog/create-excel-report-in-ssis-using-template/ |
--excel-report-template-relations VALUE |
Relationships for master-child datasets in Excel Report template. Use this option to define related datasets for Master-Child bands in Template. Example : cust.CustomerId=ord.CustomerId. For multiple tables you can do semi-colon separated key-value pairs (parentTbl1.IdCol=childTbl1.IdCol;parentTbl2.IdCol=childTbl2.IdCol;). This option is ignored if ExcelUseReportTemplate=false |
--excel-report-template-path FILE_PATH |
Excel report template file path |
--single-dataset |
|
--no-null-prop |
Do not output property if value is NULL. By default NULL value also output in file (e.g. {"Email": null} --OR-- <email xsi:nil="true"></email>) |
--no-empty-array |
Set this option to True if you dont want to output empty array nodes when no record found for that array (e.g. "Orders": [ ] for JSON --OR-- <Orders></Orders> for Xml). You can enable this option to avoid such empty nodes from the output. |
--no-empty-nested-prop |
Set this option to True if you dont want to output empty nodes of sub document (e.g nested property). This option is only applicable if you enabled DoNotOutputNullProperty. If all child peoprties are NULL for parent node then by default output contains empty nodes (e.g. CountryInfo: { CityInfo: { } } for Json --OR-- <CountryInfo><CityInfo /></CountryInfo> for Xml). You can enable this option to avoid such empty nodes from the output. |
--xml-prefix PREFIX |
Prefix for Root element. If you don't use Custom Layout then this prefix will be applied to all elements. For example if you want to output element like <ns1:Root> then set prefix ns1 here. |
--xml-namespaces KEY_VALUE_PAIRS |
Prefix and namespaces you would like to use for your XML attributes/elements. You can enter multiple prefix and namespace url using key value pair (separated by comma). Example: ns1=http://abc.com/schema1,ns2=http://abc.com/schema2 |
--xml-row-element NAME |
Row element name for each data row. This is not applicable if you use custom layout. |
--xml-root-element NAME |
Document root node (First node after XML declaration) |
--xml-standalone VALUE |
Standalone attribute for XML declaration (Leave blank to exclude this attribute). This attribute is usually used when you have DTD validation in XML which is not very common. |
--xml-no-declare |
Controls weather you need to output XML declaration node (e.g. <?xml version="1.0" ?>) |
--enable-custom-replace |
Enable this option to define custom replacement rules (See also custom-replace-rules). Replacement rule should contain key=value pairs separated by semicolon. You can hard code character directly or use designated keyword (e.g. supported keywords are {doublequote} {semicolon} {tab}, {cr}, {lf}, {null}). You can also use Hex chars to search (e.g. \xff44=something). Use blank value in the second part to replace with blank. Example: {null}=\0;"="";{semicolon}=\{semicolon};{cr}=\r;{lf}=\n;{tab}=\t;{columndelimiter}=\{columndelimiter};\xff32=;\xff31=;{null}=; |
--custom-replace-rules RULES |
Define custom replacement rules to search and replace certain characters. This option is only valid when EnableCustomEscapeSequence=true. Replacement rule should contain key=value pairs separated by semicolon (i.e. ; ). In the key=value pairs, left side you have to define character to be searched and on the right side replacement value. You can hard code character directly or use designated keyword (e.g. supported keywords are {doublequote} {semicolon} {tab}, {cr}, {lf}, {null}). You can also use Hex chars to search (e.g. \xff44=something). Use blank value in the second part to replace with blank. Example: {null}=\0;"="";{semicolon}=\{semicolon};{cr}=\r;{lf}=\n;{tab}=\t;{columndelimiter}=\{columndelimiter};\xff32=;\xff31=;{null}=; |
-m | --map MAP_FILE_PATH |
Map file path which contains additional information |
-o | --out FILE_PATH |
Target file path |
--print-landscape |
Printer mode to landscape when exporting file. Only applicable to Html. Pdf, Excel files |
--print-papersize TYPE |
Printer paper size (e.g. A4, Letter, Legal). Only applicable to Html. Pdf, Excel files |
--append |
Append data at the end of file |
--csv-rowsep SEP_CHAR |
Row separator for data files (If this is not specified then New line is used). (NOTE: use \t for tab, \r\n for new line (This is default), \r for carriage return, \n for new line) |
--nohdr |
No header row with column names in the first row. By default first row is header row if this option is not specified. This option is only valid when output file format is excel or csv. |
--csv-colsep SEP_CHAR |
Field separator character for data files. (NOTE: use \t for tab) |
--groupby COLUMN_NAME |
Split files by group by column value. e.g. if you group by Country field then you will have one file for each country. Country name will be used as file name suffix. If you specify --split option then you may have more than one file for each value in group by field depending on split size/rows you specify |
--gzip |
Compress file in gzip format |
--keep-source |
Keep source file after compressing it when --gzip option enabled. By default source file is deleted once its compressed. |
--csv-quote |
Wrap quotes around value in data files. Default is true |
--no-csv-quote |
Disable quotes around value in data files |
--throw-err-if-nodata |
When this setting is true then Source data must have one or more records otherwise task will fail with error |
--cmd-timeout TIMEOUT_SEC |
Command Timeout in seconds for source query. 0 means no timeout. |
--enc TYPE |
File encoding |
--enc-ascii |
Use ASCII encoding for files |
--enc-utf7 |
Use UTF7 encoding for files |
--enc-utf8 |
Use UTF8 encoding for files. This is default encoding type. |
--enc-utf8-nobom |
Use UTF8 encoding (Without BOM bytes) for files. This is default encoding type. |
--enc-utf16 |
Use UTF16 encoding for files |
--enc-utf16-nobom |
Use UTF16 encoding (Without BOM bytes) for files. This is default encoding type. |
--enc-utf32 |
Use UTF32 encoding for files |
--charset |
character set identifier or codepage number for file |
--source-split-char |
character which splits multiple source queries |
--dateformat |
Date format in csv or json data file (e.g. YYYY-MM-DD HH24:MI:SS.MI) |
--split |
Split into multiple files (when split enabled you can split by filesize or rowcount. Check --splitsize and --splitrows options). |
-y | --overwrite |
Overwrite target file if exists |
--splitsize FILE_SIZE |
Maximum size per file. You can specify in bytes,kb,mb or gb. Example: 10mb, 5000kb, 1gb, 2000bytes |
--splitrows ROW_COUNT |
Maximum rows per file |
--maxrows ROW_COUNT |
Maximum number of rows to export. |
--excel-sheetname NAME |
Sheet name of excel workbook where you want to export data |
--excel-offset CELL_LOCATION |
Excel cell location from where you want start writing data. (e.g. If you wish to export data at 3rd row of 2nd column then use B3). Default is A1 means first row first column |
--excel-clear-range RANGE |
Range you want to clear before writing data (Example: A1:M25). This is helpful when you writing data on existing Sheet and you want to make sure no previous content left in specified area before writing. |
--excel-saveas-multi-tab |
Set this option to true if you want to save multiple tables on tabs rather than split in files. Example if you want to save all tables with _stg% on tabs then set table name as _stg% and set this option to true. It will create one tab for each table. |
--excel-split-ontab |
When multiple tables specified or --groupby option specified along with this option then data is split on tabs rather than separate file for each group. e.g. if you set --groupby Country along with --split-ontab then you will see tab for each country in excel workbook. |
--json-multi-content |
Output records in Multi content json where each json record is appended to file without comma and opening closing array brackets (e.g. {...}{...} ). If you dont specify this option then default is Array format (e.g. [{...},{...}] ) |
--json-array |
Output records in Array format (e.g. [{...},{...}] ) |
--pretty-print |
Indent output (i.e. Pretty Print) |
--excel-ver EXCEL_VERSION |
File version for excel. Default value is 2007 (e.g. xlsx format of Excel 2007) which supports more than 65000 rows and many new features. If you need backward compatibility for some reason then use 2003 format. Possible values are [2003|2007|2010|2013] |
--csv |
Export data in CSV format |
--always-quote |
Always quote around for CSV regardless datatype (by default numeric data is not quoted. This can be problem with some number format where comma is used for decimal) |
--json |
Export data in JSON |
--xml |
Export data in XML format |
--excel |
Export data in Excel format. Use --excel-version option to specify file format |
Export data in pdf format. | |
--html |
Export data in HTML format. |
--consoleout FILEPATH |
Captures console output and saves to file |
-v | --verbose |
Log verbosity level. -v means detailed, If you don't specify this option then minimum logging will be done. |
-w | --wait |
Wait for user input before exit |
-i | --interactive |
Continue in interactive mode after this command is completed. This switch is not applicable if you already in interactive mode |
To export json file with nested structure you have to provide map file. Below is the example of map file which shows how to include multiple datasets, array, join, nested elements etc.
SampleMapFile.xml
<?xml version="1.0" encoding="UTF-8"?> <settings ver="1"> <dataset id="cust" main="true">Customers</dataset> <dataset id="ord">Orders</dataset> <dataset id="emp">Employees</dataset> <join parentid="cust" childid="ord" keys="CustomerID" /> <join parentid="ord" childid="emp" keys="EmployeeID" /> <map src="CustomerID" name="ID" /> <map src="CompanyName" /> <map name="Contact"> <map src="Phone" /> <map src="Fax" /> <map name="Location"> <map src="Country" /> <map src="Region" /> </map> </map> <map name="OrderIDList" dataset="ord" maptype="valuearray" src="OrderID"/> <map name="Orders" dataset="ord"> <map src="CustomerID" /> <map src="OrderID" /> <map src="OrderDate" /> <map name="EmpInfo" dataset="emp"> <map src="EmployeeID" /> <map src="FirstName" /> </map> </map> </settings>
export --map c:\MySampleMap.xml --out c:\data\customers.json --connstr --connstr "Driver={MySQL ODBC 5.2 ANSI Driver};Server=your_server_name_or_ip;Port=3306;Uid=user1;pwd=SomeP@ssword;Database=your_db_name"Here is sample json exported using above map file
[ { "ID": "ALFKI", "CompanyName": "Alfreds Futterkiste", "Contact": { "Phone": "030-0074321", "Fax": "030-0076545", "Location": { "Country": "Germany", "Region": null } }, "OrderIDList": [ 10643, 10692, 10702, 10835, 10952, 11011 ], "Orders": [ { "CustomerID": "ALFKI", "OrderID": 10643, "OrderDate": "1997-08-25", "EmpInfo": [ { "EmployeeID": 6, "FirstName": "Michael" } ] }, { "CustomerID": "ALFKI", "OrderID": 10702, "OrderDate": "1997-10-13", "EmpInfo": [ { "EmployeeID": 4, "FirstName": "Margaret" } ] }, { "CustomerID": "ALFKI", "OrderID": 11011, "OrderDate": "1998-04-09", "EmpInfo": [ { "EmployeeID": 3, "FirstName": "Janet" } ] } ] }, { "ID": "ANATR", "CompanyName": "Ana Trujillo Emparedados y helados", "Contact": { "Phone": "(5) 555-4729", "Fax": "(5) 555-3745", "Location": { "Country": "Mexico", "Region": null } }, "OrderIDList": [ 10308, 10625, 10759, 10926 ], "Orders": [ { "CustomerID": "ANATR", "OrderID": 10308, "OrderDate": "1996-09-18", "EmpInfo": [ { "EmployeeID": 7, "FirstName": "Robert" } ] }, { "CustomerID": "ANATR", "OrderID": 10625, "OrderDate": "1997-08-08", "EmpInfo": [ { "EmployeeID": 3, "FirstName": "Janet" } ] } ] }
Examples |
---|
export sql server table to csv file (use of windows authentication)export customers -o c:\data\cust.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export sql server table to excel fileexport customers -o c:\data\cust.xlsx --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export sql server table to excel file, split data on multiple tabs group by specific field (e.g. Country)export customers -o c:\data\cust.xlsx --groupby Country --excel-split-ontab --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export sql server table to JSON fileexport customers -o c:\data\cust.json --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export sql server table to JSON file in multi-content formatexport customers -o c:\data\cust.json --json-multi-content --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export sql server table to PDF fileexport customers -o c:\data\cust.pdf --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export sql server table to HTML fileexport customers -o c:\data\cust.html --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export multiple sql server tables to csv file (use of windows authentication)export "customers|orders|products" -o c:\data\ --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export multiple tables using name pattern (all table starting with _stg will be exported to csv)export "_stg%" --exclude-views -o c:\data\ --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export multiple tables using name pattern from HR schema (all table starting with _stg will be exported to csv). You can use pattern in --schema too.export "_stg%" --schema HR --exclude-views -o c:\data\ --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export query to csvexport "select * from customers where country='USA'" -o c:\data\us_customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export multiple queries to csv. When you have multiple files file name will be suffixed by counter e.g. file1.csv, file2.csv.export "select * from customers where country='USA'|select * from orders" -o c:\data\file.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export table to csv file with timestamp placeholders in file nameexport "customers" -o c:\data\customers_{{YYYY}}_{{MM}}_{{DD}}_{{HH24}}{{MI}}{{SS}}.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Append data to existing fileexport "select * from customers" --append -o c:\data\customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Create tab delimited fileexport "customers" --csv-colsep \t -o c:\data\customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Create csv file without headerexport "customers" --nohdr -o c:\data\customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Export and split data into multiple csv files with maximum 100 rows per fileexport "orders" --split --splitrows 100 -o c:\data\customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Export and split data into multiple csv files with maximum 10KB size per fileexport "orders" --split --splitsize 10KB -o c:\data\customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Export and split data into multiple files based on group by field (e.g. if we group by Country then we will have one file per country)export "Customers" --groupby Country -o c:\data\customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Export and split data into multiple files based on group by field and max size per file (e.g. if we group by Country then we will have one file per country)export "Customers" --groupby Country --split --splitsize 10MB -o c:\data\customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Export compressed data in GZip formatexport "Customers" --groupby Country -o c:\data\customers.csv --gzip --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Export compressed data in GZip format and keep original tooexport "Customers" --groupby Country -o c:\data\customers.csv --gzip --keep-source --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
Export in ASCII encoding rather than UTF8 (which is default) to reduce file size. Use this only if you not using any Unicode characters.export "Customers" --enc-ascii -o c:\data\customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |
export MySQL data to json file (Example of MySQL 5.2 ODBC driver)export customers --out c:\data\customers.json --json --overwrite --connstr "Driver={MySQL ODBC 5.2 ANSI Driver};Server=your_server_name_or_ip;Port=3306;Uid=user1;pwd=SomeP@ssword;Database=your_db_name" |
export oracle data to JSON file using ODBC driverexport customers --out c:\data\customers.json --overwrite --connstr "Driver={oracle in oradb10g_home1};uid=scott;pwd=tiger;server=(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=127.0.0.1)(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=ORCL)))" |
export oracle data to JSON file using ODBC driver (use of TNS NAME)export customers --out c:\data\customers.json --overwrite --connstr "Driver={oracle in oradb10g_home1};uid=scott;pwd=tiger;server=myTNSalias" |
export oracle data to JSON file using OLEDB driverexport customers --out c:\data\customers.json --overwrite --connstr "Provider=OraOLEDB.Oracle;User ID=scott;password=tiger;Data Source=XE" |
export table to Json file (more complex json using map file. This json example shows how to join multiple tables, how to export array, how to export nested documents. Refer Map file example in this help above.)export --map c:\data\MySampleMap.xml -o c:\data\customers.csv --connstr "Data Source=localhost;Initial Catalog=Northwind;Integrated Security=SSPI;" -y |