Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,13 @@ with open('report.xls', 'wb') as f:
0 model-t True Henry Ford [email protected] 2016-02-06 22:28:23.894202
```

**SQL**

``` python
>>> rows.export('sql')
INSERT INTO active_users (username,active,name,user_email,timezone) VALUES ('model-t', TRUE, 'Henry Ford', '[email protected]', TIMESTAMP '2016-02-06 22:28:23.894202');
```

You get the point. All other features of Tablib are also available, so
you can sort results, add/remove columns/rows, remove duplicates,
transpose the table, add separators, slice data by column, and more.
Expand Down
7 changes: 7 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,13 @@ Excellent for sharing data with friends, or generating reports.
username active name user_email timezone
0 model-t True Henry Ford [email protected] 2016-02-06 22:28:23.894202

**SQL**

.. code:: python

>>> rows.export('sql')
INSERT INTO active_users (username,active,name,user_email,timezone) VALUES ('model-t', TRUE, 'Henry Ford', '[email protected]', TIMESTAMP '2016-02-06 22:28:23.894202');

You get the point. All other features of Tablib are also available,
so you can sort results, add/remove columns/rows, remove duplicates,
transpose the table, add separators, slice data by column, and more.
Expand Down
40 changes: 36 additions & 4 deletions records.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,18 @@ def dataset(self):

return data

@property
def _typed_dataset(self):
"""A Tablib Dataset containing the row with original types."""
data = tablib.Dataset()
data.headers = self.keys()
data.append(self.values())
return data

def export(self, format, **kwargs):
"""Exports the row to the given format."""
if format == 'sql':
return self._typed_dataset.export(format, **kwargs)
return self.dataset.export(format, **kwargs)


Expand Down Expand Up @@ -167,10 +177,6 @@ def __getitem__(self, key):
def __len__(self):
return len(self._all_rows)

def export(self, format, **kwargs):
"""Export the RecordCollection to a given format (courtesy of Tablib)."""
return self.dataset.export(format, **kwargs)

@property
def dataset(self):
"""A Tablib Dataset representation of the RecordCollection."""
Expand All @@ -192,6 +198,32 @@ def dataset(self):

return data

@property
def _typed_dataset(self):
"""A Tablib Dataset representation of the RecordCollection with original types."""
# Create a new Tablib Dataset.
data = tablib.Dataset()

# If the RecordCollection is empty, just return the empty set
# Check number of rows by typecasting to list
if len(list(self)) == 0:
return data

# Set the column names as headers on Tablib Dataset.
first = self[0]

data.headers = first.keys()
for row in self.all():
data.append(row.values())

return data

def export(self, format, **kwargs):
"""Export the RecordCollection to a given format (courtesy of Tablib)."""
if format == 'sql':
return self._typed_dataset.export(format, **kwargs)
return self.dataset.export(format, **kwargs)

def all(self, as_dict=False, as_ordereddict=False):
"""Returns a list of all rows for the RecordCollection. If they haven't
been fetched yet, consume the iterator and cache the results."""
Expand Down