Permalink
Browse files

Merge branch 'hotfix/4.0.1'

  • Loading branch information...
roji committed Jul 1, 2018
2 parents 640341e + 4f87a82 commit c442c87e125ed8c372fc4425661bc1027d23cf6b
@@ -1,5 +1,5 @@
image: Visual Studio 2017
version: 4.0.0-{build}
version: 4.0.1-{build}
environment:
global:
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true
@@ -2,7 +2,7 @@ language: csharp
dist: trusty
sudo: false
mono: none
dotnet: 2.1.3
dotnet: 2.1.300
services:
- postgresql
@@ -19,10 +19,10 @@ before_script:
- psql -U postgres -c "CREATE EXTENSION postgis" npgsql_tests
- dotnet restore -v Minimal Npgsql.sln
script:
- dotnet build "test/Npgsql.Tests" -c Debug -f netcoreapp2.0
- dotnet build "test/Npgsql.PluginTests" -c Debug -f netcoreapp2.0
- dotnet test "test/Npgsql.Tests/Npgsql.Tests.csproj" -c Debug -f netcoreapp2.0
- dotnet test "test/Npgsql.PluginTests/Npgsql.PluginTests.csproj" -c Debug -f netcoreapp2.0
- dotnet build "test/Npgsql.Tests" -c Debug -f netcoreapp2.1
- dotnet build "test/Npgsql.PluginTests" -c Debug -f netcoreapp2.1
- dotnet test "test/Npgsql.Tests/Npgsql.Tests.csproj" -c Debug -f netcoreapp2.1
- dotnet test "test/Npgsql.PluginTests/Npgsql.PluginTests.csproj" -c Debug -f netcoreapp2.1
cache:
directories:
@@ -62,7 +62,7 @@ internal ParseMessage Populate(string sql, string statementName, List<NpgsqlPara
Populate(sql, statementName);
foreach (var inputParam in inputParameters)
{
inputParam.ResolveHandler(typeMapper);
Debug.Assert(inputParam.Handler != null, "Input parameter doesn't have a resolved handler when populating Parse message");
ParameterTypeOIDs.Add(inputParam.Handler.PostgresType.OID);
}
return this;
@@ -6,7 +6,7 @@
<Copyright>Copyright 2018 © The Npgsql Development Team</Copyright>
<Company>Npgsql</Company>
<PackageTags>npgsql postgresql postgres ado ado.net database sql</PackageTags>
<VersionPrefix>4.0.0</VersionPrefix>
<VersionPrefix>4.0.1</VersionPrefix>
<LangVersion>latest</LangVersion>
<TargetFrameworks>net45;net451;netstandard2.0</TargetFrameworks>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
@@ -399,7 +399,7 @@ ELSE pg_proc.proargmodes
END AS proargmodes
FROM pg_proc
LEFT JOIN pg_type ON pg_proc.prorettype = pg_type.oid
LEFT JOIN pg_attribute ON pg_type.typrelid = pg_attribute.attrelid AND pg_attribute.attnum >= 1
LEFT JOIN pg_attribute ON pg_type.typrelid = pg_attribute.attrelid AND pg_attribute.attnum >= 1 AND NOT pg_attribute.attisdropped
WHERE pg_proc.oid = :proname::regproc
GROUP BY pg_proc.proargnames, pg_proc.proargtypes, pg_proc.proallargtypes, pg_proc.proargmodes, pg_proc.pronargs;
";
@@ -594,9 +594,7 @@ Task Prepare(bool async)
{
var connector = CheckReadyAndGetConnector();
for (var i = 0; i < Parameters.Count; i++)
if (!Parameters[i].IsTypeExplicitlySet)
throw new InvalidOperationException(
"The Prepare method requires all parameters to have an explicitly set type.");
Parameters[i].Bind(connector.TypeMapper);
ProcessRawQuery();
Log.Debug($"Preparing: {CommandText}", connector.Id);
@@ -234,11 +234,15 @@ Task Open(bool async, CancellationToken cancellationToken)
Counters.SoftConnectsPerSecond.Increment();
// Since this pooled connector was opened, global mappings may have
// changed. Bring this up to date if needed.
// Since this pooled connector was opened, types may have been added (and ReloadTypes() called),
// or global mappings may have changed. Bring this up to date if needed.
var mapper = Connector.TypeMapper;
if (mapper.ChangeCounter != TypeMapping.GlobalTypeMapper.Instance.ChangeCounter)
{
// We always do this synchronously which isn't amazing but not very important
Connector.LoadDatabaseInfo(NpgsqlTimeout.Infinite, false).GetAwaiter().GetResult();
mapper.Reset();
}
Debug.Assert(Connector.Connection != null, "Open done but connector not set on Connection");
Log.Debug("Connection opened", Connector.Id);
@@ -301,11 +305,14 @@ async Task OpenLong()
else // No enlist
Connector = await _pool.AllocateLong(this, timeout, async, cancellationToken);
// Since this pooled connector was opened, global mappings may have
// changed. Bring this up to date if needed.
// Since this pooled connector was opened, types may have been added (and ReloadTypes() called),
// or global mappings may have changed. Bring this up to date if needed.
mapper = Connector.TypeMapper;
if (mapper.ChangeCounter != TypeMapping.GlobalTypeMapper.Instance.ChangeCounter)
{
await Connector.LoadDatabaseInfo(NpgsqlTimeout.Infinite, async);
mapper.Reset();
}
}
// We may have gotten an already enlisted pending connector above, no need to enlist in that case
@@ -321,7 +328,6 @@ async Task OpenLong()
Log.Debug("Connection opened", Connector.Id);
OnStateChange(ClosedToOpenEventArgs);
}
}
#endregion Open / Init
@@ -1420,14 +1426,17 @@ public void UnprepareAll()
}
/// <summary>
/// Flushes the type cache for this connection's connection string and reloads the
/// types for this connection only.
/// Flushes the type cache for this connection's connection string and reloads the types for this connection only.
/// Type changes will appear for other connections only after they are re-opened from the pool.
/// </summary>
public void ReloadTypes()
{
var conn = CheckReadyAndGetConnector();
NpgsqlDatabaseInfo.Cache.TryRemove(_connectionString, out var _);
conn.LoadDatabaseInfo(NpgsqlTimeout.Infinite, false).GetAwaiter().GetResult();
// Increment the change counter on the global type mapper. This will make conn.Open() pick up the
// new DatabaseInfo and set up a new connection type mapper
TypeMapping.GlobalTypeMapper.Instance.RecordChange();
}
#endregion Misc
@@ -119,7 +119,7 @@ public abstract class NpgsqlDataReader : DbDataReader
/// A stream that has been opened on a column.
/// </summary>
[CanBeNull]
protected Stream ColumnStream;
private protected NpgsqlReadBuffer.ColumnStream ColumnStream;
/// <summary>
/// Used for internal temporary purposes
@@ -134,7 +134,7 @@ internal NpgsqlDataReader(NpgsqlConnector connector)
Connector = connector;
}
internal virtual void Init(NpgsqlCommand command, CommandBehavior behavior, List<NpgsqlStatement> statements, Task sendTask)
internal void Init(NpgsqlCommand command, CommandBehavior behavior, List<NpgsqlStatement> statements, Task sendTask)
{
Command = command;
Debug.Assert(command.Connection == Connector.Connection);
@@ -1009,7 +1009,7 @@ ValueTask<Stream> GetStream(int ordinal, bool async)
ValueTask<Stream> GetStreamInternal(int ordinal, bool async)
{
if (ColumnStream != null)
if (ColumnStream != null && !ColumnStream.IsDisposed)
throw new InvalidOperationException("A stream is already open for this reader");
var t = SeekToColumn(ordinal, async);
@@ -1019,15 +1019,15 @@ ValueTask<Stream> GetStreamInternal(int ordinal, bool async)
if (ColumnLen == -1)
throw new InvalidCastException("Column is null");
PosInColumn += ColumnLen;
return new ValueTask<Stream>(ColumnStream = Buffer.GetStream(ColumnLen, !IsSequential));
return new ValueTask<Stream>(ColumnStream = (NpgsqlReadBuffer.ColumnStream)Buffer.GetStream(ColumnLen, !IsSequential));
async Task<Stream> GetStreamLong(Task seekTask)
{
await seekTask;
if (ColumnLen == -1)
throw new InvalidCastException("Column is null");
PosInColumn += ColumnLen;
return ColumnStream = Buffer.GetStream(ColumnLen, !IsSequential);
return ColumnStream = (NpgsqlReadBuffer.ColumnStream)Buffer.GetStream(ColumnLen, !IsSequential);
}
}
@@ -35,58 +35,52 @@ internal override ValueTask<IBackendMessage> ReadMessage(bool async)
protected override Task<bool> NextResult(bool async, bool isConsuming=false)
{
var task = base.NextResult(async, isConsuming);
return Command.Parameters.HasOutputParameters && StatementIndex == -1
? NextResultWithOutputParams()
: base.NextResult(async, isConsuming);
if (Command.Parameters.HasOutputParameters && StatementIndex == 0)
async Task<bool> NextResultWithOutputParams()
{
// Populate the output parameters from the first row of the first resultset
return task.ContinueWith((t, o) =>
{
if (HasRows)
PopulateOutputParameters();
return t.Result;
}, null);
}
var hasResultSet = await base.NextResult(async, isConsuming);
return task;
}
if (!hasResultSet || !HasRows)
return hasResultSet;
/// <summary>
/// The first row in a stored procedure command that has output parameters needs to be traversed twice -
/// once for populating the output parameters and once for the actual result set traversal. So in this
/// case we can't be sequential.
/// </summary>
void PopulateOutputParameters()
{
Debug.Assert(Command.Parameters.Any(p => p.IsOutputDirection));
Debug.Assert(StatementIndex == 0);
Debug.Assert(RowDescription != null);
Debug.Assert(State == ReaderState.BeforeResult);
// The first row in a stored procedure command that has output parameters needs to be traversed twice -
// once for populating the output parameters and once for the actual result set traversal. So in this
// case we can't be sequential.
Debug.Assert(Command.Parameters.Any(p => p.IsOutputDirection));
Debug.Assert(StatementIndex == 0);
Debug.Assert(RowDescription != null);
Debug.Assert(State == ReaderState.BeforeResult);
// Temporarily set our state to InResult to allow us to read the values
State = ReaderState.InResult;
// Temporarily set our state to InResult to allow us to read the values
State = ReaderState.InResult;
var pending = new Queue<NpgsqlParameter>();
var taken = new List<int>();
foreach (var p in Command.Parameters.Where(p => p.IsOutputDirection))
{
if (RowDescription.TryGetFieldIndex(p.TrimmedName, out var idx))
var pending = new Queue<NpgsqlParameter>();
var taken = new List<int>();
foreach (var p in Command.Parameters.Where(p => p.IsOutputDirection))
{
// TODO: Provider-specific check?
p.Value = GetValue(idx);
taken.Add(idx);
if (RowDescription.TryGetFieldIndex(p.TrimmedName, out var idx))
{
// TODO: Provider-specific check?
p.Value = GetValue(idx);
taken.Add(idx);
}
else
pending.Enqueue(p);
}
else
pending.Enqueue(p);
}
for (var i = 0; pending.Count != 0 && i != RowDescription.NumFields; ++i)
{
// TODO: Need to get the provider-specific value based on the out param's type
if (!taken.Contains(i))
pending.Dequeue().Value = GetValue(i);
}
for (var i = 0; pending.Count != 0 && i != RowDescription.NumFields; ++i)
{
// TODO: Need to get the provider-specific value based on the out param's type
if (!taken.Contains(i))
pending.Dequeue().Value = GetValue(i);
}
State = ReaderState.BeforeResult; // Set the state back
State = ReaderState.BeforeResult; // Set the state back
return hasResultSet;
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
@@ -31,12 +31,12 @@ namespace Npgsql
{
public sealed partial class NpgsqlReadBuffer
{
sealed class ColumnStream : Stream
internal sealed class ColumnStream : Stream
{
readonly NpgsqlReadBuffer _buf;
int _start, _len, _read;
bool _canSeek;
bool _disposed;
internal bool IsDisposed { get; private set; }
internal ColumnStream(NpgsqlReadBuffer buf)
=> _buf = buf;
@@ -49,7 +49,7 @@ internal void Init(int len, bool canSeek)
_len = len;
_read = 0;
_canSeek = canSeek;
_disposed = false;
IsDisposed = false;
}
public override bool CanRead => true;
@@ -188,19 +188,19 @@ public override Task WriteAsync(byte[] buffer, int offset, int count, Cancellati
void CheckDisposed()
{
if (_disposed)
if (IsDisposed)
throw new ObjectDisposedException(null);
}
protected override void Dispose(bool disposing)
{
if (_disposed)
if (IsDisposed)
return;
var leftToSkip = _len - _read;
if (leftToSkip > 0)
_buf.Skip(leftToSkip, false).GetAwaiter().GetResult();
_disposed = true;
IsDisposed = true;
}
}
}
@@ -39,20 +39,16 @@ sealed class NpgsqlSequentialDataReader : NpgsqlDataReader
internal NpgsqlSequentialDataReader(NpgsqlConnector connector)
: base(connector) {}
internal override void Init(NpgsqlCommand command, CommandBehavior behavior, List<NpgsqlStatement> statements, Task sendTask)
{
base.Init(command, behavior, statements, sendTask);
Debug.Assert(!command.Parameters.HasOutputParameters);
// In sequential reading mode we always use the connector's buffer, unlike in non-sequential
// where an oversize buffer may be allocated for a big DataRow
Buffer = Connector.ReadBuffer;
}
internal override ValueTask<IBackendMessage> ReadMessage(bool async)
=> Connector.ReadMessage(async, DataRowLoadingMode.Sequential);
internal override void ProcessDataMessage(DataRowMessage dataMsg)
{
// When reading sequentially, we never allocate oversize buffers for data rows since they don't have
// to fit in the buffer (that's the point of sequential). However, if the row description message is bigger
// than the buffer, an oversize buffer will be allocated (see #2003). This is hacky and needs to be redone.
Buffer = Connector.ReadBuffer;
_column = -1;
ColumnLen = -1;
PosInColumn = 0;
@@ -367,8 +367,9 @@ public void WriteString(string s, int len = 0)
internal void WriteChars(char[] chars, int offset, int len)
{
Debug.Assert(TextEncoding.GetByteCount(chars) <= WriteSpaceLeft);
WritePosition += TextEncoding.GetBytes(chars, offset, len == 0 ? chars.Length : len, Buffer, WritePosition);
var charCount = len == 0 ? chars.Length : len;
Debug.Assert(TextEncoding.GetByteCount(chars, 0, charCount) <= WriteSpaceLeft);
WritePosition += TextEncoding.GetBytes(chars, offset, charCount, Buffer, WritePosition);
}
public void WriteBytes(byte[] buf) => WriteBytes(buf, 0, buf.Length);
Oops, something went wrong.

0 comments on commit c442c87

Please sign in to comment.