dataset = redivis.user("user_name").dataset("dataset_name", version="next")
table = dataset.table("table_name")
# schema is optional if update_schema is set to True on the insert_rows request
{ "name": "var1", "type": "string" },
{ "name": "var2", "type": "integer" },
{ "name": "var3", "type": "dateTime" }
{ "var1": "hello", "var2": 1, "var3": None },
# dateTime must be in the format YYYY-MM-DD[ |T]HH:MM:SS[.ssssss]
{ "var1": "world", "var2": 2, "var3": "2020-01-01T00:00:00.123" }
# Reference each upload with its name, which must be unique amongst other uploads
# for the current version of this table.
upload = table.upload(name="some_streamed_data")
# Only call create if the upload doesn't already exist
# schema is optional if update_schema is set to True on insert_rows
# If True, will only create the upload if an upload with this name doesn't already exist
# Otherwise, a counter will be added to the name to preserve name uniqueness
# If skip_bad_records is True, ignore records that are incompatible with the existing schema.
# This has no effect when update_schema is set to True on the insert_rows request.
skip_bad_records=False # Optional, default is False
insert_response = upload.insert_rows(
# If update_schema is set to True, variables can be added by subsequent streams,
# and variable types will be relaxed if new values are incompatible with the previous type.
# If False, an error will be thrown if a row would cause a schema update,
# unless skip_bad_records is set to True on the upload (in which case they'll be ignored)
# See REST API / uploads / insertRows