SQL - SQL SERVER Storage


-- If you execute the following CREATE TABLE statement with column definitions that add up to 
-- exactly 8,060 bytes, you’ll get the error message shown here:



USE test;
CREATE TABLE dbo.bigrows_fixed  
(   a char(3000),  
    b char(3000), 
    c char(2000), 
    d char(60) ) ;




-- The following code creates a table with rows that have a maximum defined length that is much longer 
-- than 8,060 bytes:


USE test;
CREATE TABLE dbo.bigrows  
  (a varchar(3000),  
   b varchar(3000), 
   c varchar(3000), 
   d varchar(3000) );




-- With SQL Server 2005 and SQL Server 2008, not only can the preceding dbo.bigrows table be created, 
-- but you can insert a row with column sizes that add up to more than 8,060 bytes with a 
-- simple INSERT, as shown here:


INSERT INTO dbo.bigrows 
     SELECT REPLICATE('e', 2100), REPLICATE('f', 2100), 
      REPLICATE('g', 2100),  REPLICATE('h', 2100);


-- To determine whether SQL Server is storing any data in row-overflow data pages for a particular table, 
-- you can run the following allocation query from Chapter 5: 


SELECT object_name(object_id) AS name,  
    partition_id, partition_number AS pnum,  rows,  
    allocation_unit_id AS au_id, type_desc as page_type_desc, 
    total_pages AS pages 
FROM sys.partitions p  JOIN sys.allocation_units a 
   ON p.partition_id = a.container_id 
WHERE object_id=object_id('dbo.bigrows');




-- To populate this table with the information about the bigrows table, I can run the following 
-- INSERT statement:


INSERT INTO sp_tablepages 
    EXEC ('DBCC IND (test, bigrows, -1)')




-- Once the table is populated, you can select only the columns of interest as follows:


SELECT PageFID, PagePID, ObjectID, PartitionID, IAM_chain_type, PageType 
FROM sp_tablepages;




-- For example, you can create the table dbo.hugerows and insert a single row into it as follows:


CREATE TABLE dbo.hugerows  
  (a varchar(3000),  
   b varchar(8000), 
   c varchar(8000), 
   d varchar(8000)); 

INSERT INTO dbo.hugerows 
     SELECT REPLICATE('a', 3000), REPLICATE('b', 8000), 
         REPLICATE('c', 8000),  REPLICATE('d', 8000);




-- The following update reduces the size of the first column by 500 bytes, reducing the row size 
-- to 7,900 bytes, which should all fit on the one data page: 


UPDATE bigrows 
SET a = replicate('a', 1600);




-- Now reduce the size of the first column by more than 1,000 bytes and run the allocation query once more: 


UPDATE bigrows 
SET a = 'aaaaa';




-- The following script uses the sp_tablepages table created previously:


IF EXISTS (SELECT * FROM sys.tables
WHERE name = 'textdata')
    DROP TABLE textdata;
GO
CREATE TABLE textdata
 (bigcol text);
GO
INSERT INTO textdata
   SELECT REPLICATE(convert(varchar(MAX), 'a'), 38000);
GO
TRUNCATE TABLE sp_tablepages;
GO
INSERT INTO sp_tablepages
  EXEC('DBCC IND(test, textdata, -1)');
GO
SELECT PageFID, PagePID, ObjectID, IAM_chain_type, PageType
FROM sp_tablepages;
GO
INSERT INTO textdata
   SELECT REPLICATE(convert(varchar(MAX), 'a'), 41000);
GO
TRUNCATE TABLE sp_tablepages;
GO
INSERT INTO sp_tablepages
  EXEC('DBCC IND(test, textdata, -1)');
GO
SELECT PageFID, PagePID, ObjectID, IAM_chain_type, PageType
FROM sp_tablepages;




-- The following command enables up to 500 bytes of LOB data to be stored with the regular row data
-- in a table called employee:


EXEC sp_tableoption employee, 'text in row', 500;




-- To determine whether a table has the text in row property enabled, you can inspect the sys.tables 
-- catalog view as follows:


SELECT name, text_in_row_limit
FROM sys.tables
WHERE name = 'employee';




-- Let’s create a table very similar to the one we created to look at row structures, but we’ll change 
-- the varchar(250) column to the text data type. We’ll use almost the same insert statement to insert
-- one row into the table:


CREATE TABLE HasText  

Col1 char(3)       NOT NULL, 
Col2 varchar(5)    NOT NULL, 
Col3 text          NOT NULL, 
Col4 varchar(20)   NOT NULL 
); 

INSERT HasText VALUES  
    ('AAA', 'BBB', REPLICATE('X', 250), 'CCC');




-- Now let’s find the basic information for this table using the allocation query and also look at the 
-- DBCC IND values for this table:


SELECT convert(char(7), object_name(object_id))  AS name,
     partition_id, partition_number AS pnum,  rows,
     allocation_unit_id AS au_id, convert(char(17), type_desc) as page_type_desc,
    total_pages AS pages
FROM sys.partitions p  JOIN sys.allocation_units a
   ON p.partition_id = a.container_id
WHERE object_id=object_id('dbo.HasText');


DBCC IND (test, HasText, -1);




-- Now let’s enable text data in the row, for up to 500 bytes:


EXEC sp_tableoption HasText, 'text in row', 500;




-- Enabling this option does not force the text data to be moved into the row. We have to update 
-- the text value to actually force the data movement:


UPDATE HasText  
SET col3 =  REPLICATE('Z', 250);




-- If you change the maximum length for text in row to 50 for the HasText table we’ve been working with,
-- this also forces the LOB data for all rows with more than 50 bytes of LOB data to be moved off the 
-- page immediately, just as when you disable the option completely:


EXEC sp_tableoption HasText, 'text in row', 50;




-- As with all configuration options, don’t forget to run the RECONFIGURE command after changing 
-- a setting, as shown here:


EXEC sp_configure 'filestream access level', 1;
RECONFIGURE;




-- The path C:\Data2 must exist, but it must not contain either the filestream1 or the filestream2 folders: 


CREATE DATABASE MyFilestreamDB 
ON
PRIMARY ( NAME = Rowdata1,
    FILENAME = 'c:\Data2\Rowdata1.mdf'),
FILEGROUP FileStreamGroup1 CONTAINS FILESTREAM DEFAULT( NAME = FSData1,
    FILENAME = 'c:\Data2\filestream1'),
FILEGROUP FileStreamGroup2 CONTAINS FILESTREAM ( NAME = FSData2,
    FILENAME = 'c:\Data2\filestream2')
LOG ON  ( NAME = FSDBLOG,
    FILENAME = 'c:\Data2\FSDB_log.ldf');




-- When created in the MyFilestreamDB database, the table here adds several folders to the container
-- for the FileStreamGroup1 container: 


CREATE TABLE MyFilestreamDB.dbo.Records
(
[Id] [uniqueidentifier] ROWGUIDCOL NOT NULL UNIQUE, 
[SerialNumber] INTEGER UNIQUE,
[Chart_Primary] VARBINARY(MAX) FILESTREAM NULL,
[Chart_Secondary] VARBINARY(MAX) FILESTREAM NULL)
FILESTREAM_ON FileStreamGroup1;




-- Replicating the 9-byte string 10,000 times results in a 90,000-byte string, which is then converted 
-- to varbinary(MAX):


USE MyFileStreamDB
INSERT INTO dbo.Records
    SELECT newid (), 24, 
      CAST (REPLICATE (CONVERT(varchar(MAX), 'Base Data'), 10000) 
AS varbinary(max)),
      0x;




-- If you inspect the filestream1 container, you see that the folder for the columns contains three files
-- for the three rows:
USE master;
GO
DROP DATABASE MyFilestreamDB;
GO
CREATE DATABASE  MyFilestreamDB  ON  PRIMARY 
     (NAME = N'Rowdata1', FILENAME = N'c:\data\Rowdata1.mdf' , SIZE = 2304KB , 
      MAXSIZE = UNLIMITED, FILEGROWTH = 1024KB ), 
 FILEGROUP  FileStreamGroup1  CONTAINS FILESTREAM  DEFAULT 
    (NAME = N'FSData1', FILENAME = N'c:\data\filestream1' ), 
 FILEGROUP  FileStreamGroup2  CONTAINS FILESTREAM 
    (NAME = N'FSData2', FILENAME = N'c:\data\filestream2' )
 LOG ON 
    (NAME = N'FSDBLOG', FILENAME = N'c:\data\FSDB_log.ldf' , SIZE = 1024KB , 
     MAXSIZE = 2048GB , FILEGROWTH = 10%);
GO
USE MyFilestreamDB;
GO
CREATE TABLE dbo.Records
(
Id [uniqueidentifier] ROWGUIDCOL NOT NULL UNIQUE, 
SerialNumber INTEGER UNIQUE,
Chart_Primary VARBINARY(MAX) FILESTREAM NULL 
)
FILESTREAM_ON FileStreamGroup1;
GO
INSERT INTO dbo.Records
    VALUES (newid(), 1, 
              CAST (REPLICATE (CONVERT(varchar(MAX), 'Base Data'), 
                      10000) as varbinary(max))),
           (newid(), 2, 
               CAST (REPLICATE (CONVERT(varchar(MAX), 'New Data'), 
                      10000) as   varbinary(max))),
  (newid(), 3, 0x);
GO
BACKUP DATABASE MyFileStreamDB to disk = 'C:\backups\FBDB.bak';
GO




-- Now delete one of the rows, as follows:


DELETE dbo.Records
WHERE SerialNumber = 2;
GO




-- However, during testing, I’m not putting much into the log at all, so I have to force the CHECKPOINT:


BACKUP LOG  MyFileStreamDB to disk = 'C:\backups\FBDB_log.bak';
CHECKPOINT;




-- We need to back up the log and then force another CHECKPOINT:


BACKUP LOG  MyFileStreamDB to disk = 'C:\backups\FBDB_log.bak';
CHECKPOINT;




-- So the following code returns a UNC name for the file containing the actual column’s data in 
-- the row I inserted previously:


SELECT Chart_Primary, Chart_Primary.PathName()
FROM dbo.Records
WHERE SerialNumber = 24;
GO




-- If you rerun the above script but don’t back up the log, you can use the following script:


USE MyFilestreamDB;
GO
SELECT name FROM sys.internal_tables
WHERE name like '%tombstone%';


-- I see the table named: filestream_tombstone_2073058421
-- Reconnect using DAC, which puts us in the master database
USE MyFileStreamDB;
GO
SELECT * FROM sys.filestream_tombstone_2073058421;
GO




-- Because a row allowing sparse columns has a smaller maximum length, it fails when trying to 
-- insert a row that the table with no sparse columns has no problem with:


USE test;
GO
CREATE TABLE test_nosparse

  col1 int,
  col2 char(8000),
  col3 varchar(8000)
);
GO
INSERT INTO test_nosparse 
SELECT null, null, null;
INSERT INTO test_nosparse 
SELECT 1, 'a', 'b';
GO




-- These two rows can be inserted with no error. Now, build the second table: 


CREATE TABLE test_sparse

 col1 int SPARSE,
 col2 char(8000) SPARSE,
 col3 varchar(8000) SPARSE
);
GO
INSERT INTO test_sparse 
SELECT NULL, NULL, NULL;
INSERT INTO test_sparse 
SELECT 1, 'a', 'b';
GO




-- However, when we try to make one of the columns SPARSE, even a relatively small column like 
-- the 8-byte datetime column, the extra overhead makes the existing rows too large and the ALTER fails:


IF EXISTS (SELECT * FROM sys.tables
WHERE name = 'test_nosparse_alter')
DROP TABLE test_nosparse_alter;
GO
CREATE TABLE test_nosparse_alter

c1 int,
c2 char(4020) ,
c3 char(4020) ,
c4 datetime 
);
GO
INSERT INTO test_nosparse_alter SELECT NULL, NULL, NULL, NULL;
INSERT INTO test_nosparse_alter SELECT 1, 1, 'b', GETDATE();
GO
ALTER TABLE test_nosparse_alter
  ALTER COLUMN c4 datetime SPARSE;




-- The following code builds a table containing an identity column, 25 sparse columns, and a column set:
USE test;
GO
IF EXISTS (SELECT * FROM sys.tables
WHERE name = 'lots_of_sparse_columns')
DROP TABLE lots_of_sparse_columns;
GO
CREATE TABLE lots_of_sparse_columns
(ID int IDENTITY,
 col1 int SPARSE,
 col2 int SPARSE, 
 col3 int SPARSE,
 col4 int SPARSE,
 col5 int SPARSE,
 col6 int SPARSE,
 col7 int SPARSE,
 col8 int SPARSE,
 col9 int SPARSE,
 col10 int SPARSE,
 col11 int SPARSE,
 col12 int SPARSE,
 col13 int SPARSE,
 col14 int SPARSE,
 col15 int SPARSE,
 col16 int SPARSE,
 col17 int SPARSE,
 col18 int SPARSE,
 col19 int SPARSE,
 col20 int SPARSE,
 col21 int SPARSE,
 col22 int SPARSE,
 col23 int SPARSE,
 col24 int SPARSE,
 col25 int SPARSE,
 sparse_column_set XML COLUMN_SET FOR ALL_SPARSE_COLUMNS);
 GO




-- Next, I insert values into 3 of the 25 columns, specifying individual column names:


INSERT INTO lots_of_sparse_columns (col4, col7, col12)  SELECT 4,6,11;




-- Being able to update the COLUMN_SET is another feature that differentiates COLUMN_SETs from computed columns:


INSERT INTO lots_of_sparse_columns (sparse_column_set)
SELECT '<col8>42</col8><col17>0</col17><col22>30000</col22>';




-- Here are the results when I run SELECT * from this table:


SELECT * FROM lots_of_sparse_columns;




-- So the following SELECT statements are both valid:


SELECT ID, col10, col15, col20
    FROM lots_of_sparse_columns;
SELECT *, col11
    FROM lots_of_sparse_columns;




-- First, build a table containing two sparse columns, and populate it with three rows:


USE test;
GO
IF EXISTS (SELECT * FROM sys.tables
WHERE name = 'sparse_bits')
DROP TABLE sparse_bits;
GO
CREATE TABLE sparse_bits

c1 int IDENTITY,
c2 varchar(4),
c3 char(4) SPARSE,
c4 varchar(4) SPARSE
);
GO
INSERT INTO sparse_bits SELECT 'aaaa', 'bbbb', 'cccc';
INSERT INTO sparse_bits SELECT 'dddd', null, 'eeee';
INSERT INTO sparse_bits SELECT 'ffff', null, 'gg';
GO




-- Now we can use DBCC IND to find the page number for the data page storing these three rows and 
-- then use DBCC PAGE to look at the bytes on the page:


DBCC IND(test, sparse_bits, -1);
GO
-- The output indicated that the data page for my table was on page 289;
DBCC TRACEON(3604);
DBCC PAGE(test, 1, 289, 1);




-- I could run the following query:


SELECT OBJECT_NAME(object_id) as 'Table', name as 'Column', is_sparse, is_column_set
FROM sys.columns
WHERE OBJECT_NAME(object_id) like '%sparse%';




-- If I want to see just the table and column names for all COLUMN_SET columns, I could run the 
-- following query:


SELECT OBJECT_NAME(object_id) as 'Table', name as 'Column'
FROM sys.columns
WHERE COLUMNPROPERTY(object_id, name, 'IsColumnSet') = 1;




-- One of the tables with no sparse columns is populated with rows with NULL values, and the other
-- is populated with rows that are not NULL: 
USE test;
GO
SET NOCOUNT ON;
GO
IF EXISTS (SELECT * FROM sys.tables
WHERE name = 'sparse_nonulls_size')
DROP TABLE sparse_nonulls_size;
GO
CREATE TABLE sparse_nonulls_size
(col1 int IDENTITY,
 col2 datetime SPARSE,
 col3 char(10) SPARSE
 );
GO
IF EXISTS (SELECT * FROM sys.tables
WHERE name = 'nonsparse_nonulls_size')
DROP TABLE nonsparse_nonulls_size;
GO
CREATE TABLE nonsparse_nonulls_size
(col1 int IDENTITY,
 col2 datetime,
 col3 char(10)
 );
GO
IF EXISTS (SELECT * FROM sys.tables
WHERE name = 'sparse_nulls_size')
DROP TABLE sparse_nulls_size;
GO
CREATE TABLE sparse_nulls_size
(col1 int IDENTITY,
 col2 datetime SPARSE,
 col3 char(10) SPARSE
 );
GO
IF EXISTS (SELECT * FROM sys.tables
WHERE name = 'nonsparse_nulls_size')
DROP TABLE nonsparse_nulls_size;
GO
CREATE TABLE nonsparse_nulls_size
(col1 int IDENTITY,
 col2 datetime,
 col3 char(10)
 );
GO
DECLARE @num int
SET @num = 1
WHILE @num < 100000
BEGIN
  INSERT INTO sparse_nonulls_size
SELECT GETDATE(), 'my message';
  INSERT INTO nonsparse_nonulls_size
SELECT GETDATE(), 'my message';
  INSERT INTO sparse_nulls_size
SELECT NULL, NULL;
  INSERT INTO nonsparse_nulls_size
SELECT NULL, NULL;
  SET @num = @num + 1;
END;
GO




-- Now look at the number of pages in each table. The following metadata query looks at the 
-- number of data pages in the sys.allocation_units view for each of the four tables:


SELECT object_name(object_id) as 'table with 100K rows', data_pages
FROM sys.allocation_units au 
    JOIN sys.partitions p
       ON p.partition_id = au.container_id
WHERE object_name(object_id) LIKE '%sparse%size'; 




-- A metadata query examining pages allocated to each table is executed against each of the tables
-- so that you can compare the sizes before and after row compression:


USE AdventureWorks;
GO
IF EXISTS (SELECT * FROM sys.tables
        WHERE name = 'Employees_uncompressed')
           DROP TABLE Employees_uncompressed;
GO
SELECT e.BusinessEntityID, NationalIDNumber, JobTitle, 
        BirthDate, MaritalStatus, VacationHours, 
        FirstName, LastName
  INTO Employees_uncompressed
  FROM HumanResources.Employee e
   JOIN Person.Person p
        ON e.BusinessEntityID = p.BusinessEntityID;
GO
UPDATE Employees_uncompressed 
SET NationalIDNumber = '1111', 
        JobTitle = 'Boss',
        LastName = 'Gato'
WHERE FirstName = 'Ken'
AND LastName = 'Sánchez';
GO
ALTER TABLE dbo.Employees_uncompressed
   ADD CONSTRAINT EmployeeUn_ID 
       PRIMARY KEY (BusinessEntityID);
GO
SELECT OBJECT_NAME(object_id) as name,
        rows, data_pages, data_compression_desc
FROM sys.partitions p JOIN sys.allocation_units au
        ON p.partition_id = au.container_id
WHERE object_id = object_id('dbo.Employees_uncompressed');


IF EXISTS (SELECT * FROM sys.tables
        WHERE name = 'Employees_rowcompressed')
            DROP TABLE Employees_rowcompressed;
GO 
SELECT BusinessEntityID, NationalIDNumber, JobTitle, 
        BirthDate, MaritalStatus, VacationHours, 
        FirstName, LastName
  INTO Employees_rowcompressed 
  FROM dbo.Employees_uncompressed
GO
ALTER TABLE dbo.Employees_rowcompressed 
   ADD CONSTRAINT EmployeeR_ID 
       PRIMARY KEY (BusinessEntityID);
GO
ALTER TABLE dbo.Employees_rowcompressed
REBUILD WITH (DATA_COMPRESSION = ROW);
GO
SELECT OBJECT_NAME(object_id) as name,
        rows, data_pages, data_compression_desc
FROM sys.partitions p JOIN sys.allocation_units au
        ON p.partition_id = au.container_id
WHERE object_id = object_id('dbo.Employees_rowcompressed');
GO




-- The code then uses the captured information to report on the number of data pages in each of 
-- the three tables: 


IF EXISTS (SELECT * FROM sys.tables
WHERE name = 'Employees_pagecompressed')
DROP TABLE Employees_pagecompressed;
GO
SELECT BusinessEntityID, NationalIDNumber, JobTitle, 
        BirthDate, MaritalStatus, VacationHours, 
        FirstName, LastName
  INTO Employees_pagecompressed 
  FROM dbo.Employees_uncompressed


GO
ALTER TABLE dbo.Employees_pagecompressed  
   ADD CONSTRAINT EmployeeP_ID 
       PRIMARY KEY (BusinessEntityID);
GO
ALTER TABLE dbo.Employees_pagecompressed 
REBUILD WITH (DATA_COMPRESSION = PAGE);
GO
SELECT OBJECT_NAME(object_id) as name,
rows, data_pages, data_compression_desc
FROM sys.partitions p JOIN sys.allocation_units au
ON p.partition_id = au.container_id
WHERE object_id = object_id('dbo.Employees_pagecompressed');
GO


TRUNCATE TABLE sp_tablepages;
GO
INSERT INTO sp_tablepages
   EXEC ('DBCC IND(AdventureWorks, Employees_pagecompressed, -1)');
INSERT INTO sp_tablepages
   EXEC ('DBCC IND(AdventureWorks, Employees_rowcompressed, -1)');
INSERT INTO sp_tablepages
   EXEC ('DBCC IND(AdventureWorks, Employees_uncompressed, -1)');
GO 
SELECT OBJECT_NAME(ObjectID), count(*) as NumPages
FROM sp_tablepages
WHERE pagetype = 1
GROUP BY OBJECT_NAME(ObjectID);
GO




-- Note that only the page for Employees_pagecompressed has the m_typeFlagBits value set to 0x80: 


USE AdventureWorks;
GO
TRUNCATE TABLE sp_tablepages;
GO
INSERT INTO sp_tablepages
   EXEC ('DBCC IND(AdventureWorks, Employees_pagecompressed, -1)');
GO  
INSERT INTO sp_tablepages   
   EXEC ('DBCC IND(AdventureWorks, Employees_rowcompressed, -1)');
GO
INSERT INTO sp_tablepages
   EXEC ('DBCC IND(AdventureWorks, Employees_uncompressed, -1)');
GO 
SELECT OBJECT_NAME(ObjectID), PageFID, PagePID
FROM sp_tablepages
WHERE pagetype = 1
  AND PrevPagePID = 0;
GO   
DBCC TRACEON(3604);
GO




-- Here is the partition function that we will use for this example:


CREATE PARTITION FUNCTION [TransactionRangePF1] (datetime) 
AS RANGE RIGHT FOR VALUES ('20081001', '20081101', '20081201', 
               '20090101', '20090201', '20090301', '20090401',  
               '20090501', '20090601', '20090701', '20090801');




-- Here is the partition schema for my example:


CREATE PARTITION SCHEME [TransactionsPS1] 
AS PARTITION [TransactionRangePF1]  
TO ([PRIMARY], [PRIMARY], [PRIMARY] 
, [PRIMARY], [PRIMARY], [PRIMARY] 
, [PRIMARY], [PRIMARY], [PRIMARY] 
, [PRIMARY], [PRIMARY], [PRIMARY]); 
GO




-- In fact, if you want to have all the partitions on the same filegroup, as I do here,
-- there is a shortcut syntax:


CREATE PARTITION SCHEME [TransactionsPS1] 
AS PARTITION [TransactionRangePF1]  
ALL TO ([PRIMARY]); 
GO




-- The partition function and partition scheme for a second table are shown here:


CREATE PARTITION FUNCTION [TransactionArchivePF2] (datetime)  
AS RANGE RIGHT FOR VALUES ('20080901'); 
GO 

CREATE PARTITION SCHEME [TransactionArchivePS2] 
AS PARTITION [TransactionArchivePF2] 
TO ([PRIMARY], [PRIMARY]); 
GO




-- Alternatively, you can use the following query to determine whether a table is partitioned
-- by replacing Production.TransactionHistoryArchive with the name of the table in which you’re
-- interested:


    SELECT DISTINCT object_name(object_id) as TableName,
               ISNULL(ps.name, 'Not partitioned') as PartitionScheme
       FROM (sys.indexes i LEFT  JOIN sys.partition_schemes ps
                      ON (i.data_space_id = ps.data_space_id))
       WHERE (i.object_id = object_id('Production.TransactionHistoryArchive'))
                 AND   (i.index_id IN (0,1));




-- When selecting from the view, you can add your own WHERE clause to find information about 
-- just the table you’re interested in: 


CREATE VIEW Partition_Info AS 
SELECT OBJECT_NAME(i.object_id) as Object_Name, 
        p.partition_number, fg.name AS Filegroup_Name, rows, au.total_pages, 
        CASE boundary_value_on_right  
                   WHEN 1 THEN 'less than'  
                   ELSE 'less than or equal to' END as 'comparison', value 
FROM sys.partitions p JOIN sys.indexes i 
     ON p.object_id = i.object_id and p.index_id = i.index_id 
       JOIN sys.partition_schemes ps  
                ON ps.data_space_id = i.data_space_id 
       JOIN sys.partition_functions f  
                   ON f.function_id = ps.function_id 
       LEFT JOIN  sys.partition_range_values rv     
ON f.function_id = rv.function_id  
                    AND p.partition_number = rv.boundary_id 
       JOIN sys.destination_data_spaces dds 
               ON dds.partition_scheme_id = ps.data_space_id  
                    AND dds.destination_id = p.partition_number 
       JOIN sys.filegroups fg  
                  ON dds.data_space_id = fg.data_space_id 
       JOIN (SELECT container_id, sum(total_pages) as total_pages  
                       FROM sys.allocation_units 
                       GROUP BY container_id) AS au 
                  ON au.container_id = p.partition_id   
  WHERE i.index_id <2;




-- This query uses the preceding view to get information about my TransactionHistory table’s 
-- partitions:


SELECT * FROM Partition_Info  
WHERE Object_Name = 'TransactionHistory';




-- The following query returns information about each allocation unit in the first two partitions of
-- my TransactionHistory and TransactionHistoryArchive tables, including the number of rows, the number
-- of pages, the type of data in the allocation unit, and the page where the allocation unit starts:


SELECT convert(char(25),object_name(object_id)) AS name,  
    rows, convert(char(15),type_desc) as page_type_desc, 
    total_pages AS pages, first_page, index_id, partition_number 
FROM sys.partitions p JOIN sys.system_internals_allocation_units a 
     ON p.partition_id = a.container_id 
WHERE (object_id=object_id('[Production].[TransactionHistory]') 
   OR object_id=object_id('[Production].[TransactionHistoryArchive]')) 
  AND index_id = 1 AND partition_number <= 2;




-- Please refer to SQL Server Books Online for complete details about each of the following commands:


ALTER PARTITION SCHEME TransactionsPS1  
NEXT USED [PRIMARY]; 
GO 

ALTER PARTITION FUNCTION TransactionRangePF1()  
SPLIT RANGE ('20090901'); 
GO




-- In this case, I’ll add a new boundary point for October 1, 2008: 


ALTER PARTITION SCHEME TransactionArchivePS2  
NEXT USED [PRIMARY]; 
GO 

ALTER PARTITION FUNCTION TransactionArchivePF2()  
SPLIT RANGE ('20081001'); 
GO




-- After that,  you  can add the constraint and run the ALTER TABLE / SWITCH command again.


ALTER TABLE [Production].[TransactionHistory]  
ADD CONSTRAINT [CK_TransactionHistory_DateRange]  
CHECK ([TransactionDate] >= '20080901'); 
GO 
ALTER TABLE [Production].[TransactionHistory]  
SWITCH PARTITION 1  
TO [Production].[TransactionHistoryArchive] PARTITION 2; 
GO




-- Now we run the metadata query that examines the size and location of the first two partitions
-- of each table as follows:


SELECT convert(char(25),object_name(object_id)) AS name,  
    rows, convert(char(15),type_desc) as page_type_desc, 
    total_pages AS pages, first_page, index_id, partition_number 
FROM sys.partitions p JOIN sys.system_internals_allocation_units a 
     ON p.partition_id = a.container_id 
WHERE (object_id=object_id('[Production].[TransactionHistory]') 
   OR object_id=object_id('[Production].[TransactionHistoryArchive]')) 
  AND index_id = 1 AND partition_number <= 2;