\documentclass[11pt,twoside]{article}\makeatletter
\IfFileExists{xcolor.sty}%
{\RequirePackage{xcolor}}%
{\RequirePackage{color}}
\usepackage{colortbl}
\usepackage{wrapfig}
\usepackage{ifxetex}
\ifxetex
\usepackage{fontspec}
\usepackage{xunicode}
\catcode`⃥=\active \def⃥{\textbackslash}
\catcode`❴=\active \def❴{\{}
\catcode`❵=\active \def❵{\}}
\def\textJapanese{\fontspec{Noto Sans CJK JP}}
\def\textChinese{\fontspec{Noto Sans CJK SC}}
\def\textKorean{\fontspec{Noto Sans CJK KR}}
\setmonofont{DejaVu Sans Mono}
\else
\IfFileExists{utf8x.def}%
{\usepackage[utf8x]{inputenc}
\PrerenderUnicode{–}
}%
{\usepackage[utf8]{inputenc}}
\usepackage[english]{babel}
\usepackage[T1]{fontenc}
\usepackage{float}
\usepackage[]{ucs}
\uc@dclc{8421}{default}{\textbackslash }
\uc@dclc{10100}{default}{\{}
\uc@dclc{10101}{default}{\}}
\uc@dclc{8491}{default}{\AA{}}
\uc@dclc{8239}{default}{\,}
\uc@dclc{20154}{default}{ }
\uc@dclc{10148}{default}{>}
\def\textschwa{\rotatebox{-90}{e}}
\def\textJapanese{}
\def\textChinese{}
\IfFileExists{tipa.sty}{\usepackage{tipa}}{}
\fi
\def\exampleFont{\ttfamily\small}
\DeclareTextSymbol{\textpi}{OML}{25}
\usepackage{relsize}
\RequirePackage{array}
\def\@testpach{\@chclass
\ifnum \@lastchclass=6 \@ne \@chnum \@ne \else
\ifnum \@lastchclass=7 5 \else
\ifnum \@lastchclass=8 \tw@ \else
\ifnum \@lastchclass=9 \thr@@
\else \z@
\ifnum \@lastchclass = 10 \else
\edef\@nextchar{\expandafter\string\@nextchar}%
\@chnum
\if \@nextchar c\z@ \else
\if \@nextchar l\@ne \else
\if \@nextchar r\tw@ \else
\z@ \@chclass
\if\@nextchar |\@ne \else
\if \@nextchar !6 \else
\if \@nextchar @7 \else
\if \@nextchar (8 \else
\if \@nextchar )9 \else
10
\@chnum
\if \@nextchar m\thr@@\else
\if \@nextchar p4 \else
\if \@nextchar b5 \else
\z@ \@chclass \z@ \@preamerr \z@ \fi \fi \fi \fi
\fi \fi \fi \fi \fi \fi \fi \fi \fi \fi \fi \fi}
\gdef\arraybackslash{\let\\=\@arraycr}
\def\@textsubscript#1{{\m@th\ensuremath{_{\mbox{\fontsize\sf@size\z@#1}}}}}
\def\Panel#1#2#3#4{\multicolumn{#3}{){\columncolor{#2}}#4}{#1}}
\def\abbr{}
\def\corr{}
\def\expan{}
\def\gap{}
\def\orig{}
\def\reg{}
\def\ref{}
\def\sic{}
\def\persName{}\def\name{}
\def\placeName{}
\def\orgName{}
\def\textcal#1{{\fontspec{Lucida Calligraphy}#1}}
\def\textgothic#1{{\fontspec{Lucida Blackletter}#1}}
\def\textlarge#1{{\large #1}}
\def\textoverbar#1{\ensuremath{\overline{#1}}}
\def\textquoted#1{‘#1’}
\def\textsmall#1{{\small #1}}
\def\textsubscript#1{\@textsubscript{\selectfont#1}}
\def\textxi{\ensuremath{\xi}}
\def\titlem{\itshape}
\newenvironment{biblfree}{}{\ifvmode\par\fi }
\newenvironment{bibl}{}{}
\newenvironment{byline}{\vskip6pt\itshape\fontsize{16pt}{18pt}\selectfont}{\par }
\newenvironment{citbibl}{}{\ifvmode\par\fi }
\newenvironment{docAuthor}{\ifvmode\vskip4pt\fontsize{16pt}{18pt}\selectfont\fi\itshape}{\ifvmode\par\fi }
\newenvironment{docDate}{}{\ifvmode\par\fi }
\newenvironment{docImprint}{\vskip 6pt}{\ifvmode\par\fi }
\newenvironment{docTitle}{\vskip6pt\bfseries\fontsize{22pt}{25pt}\selectfont}{\par }
\newenvironment{msHead}{\vskip 6pt}{\par}
\newenvironment{msItem}{\vskip 6pt}{\par}
\newenvironment{rubric}{}{}
\newenvironment{titlePart}{}{\par }
\newcolumntype{L}[1]{){\raggedright\arraybackslash}p{#1}}
\newcolumntype{C}[1]{){\centering\arraybackslash}p{#1}}
\newcolumntype{R}[1]{){\raggedleft\arraybackslash}p{#1}}
\newcolumntype{P}[1]{){\arraybackslash}p{#1}}
\newcolumntype{B}[1]{){\arraybackslash}b{#1}}
\newcolumntype{M}[1]{){\arraybackslash}m{#1}}
\definecolor{label}{gray}{0.75}
\def\unusedattribute#1{\sout{\textcolor{label}{#1}}}
\DeclareRobustCommand*{\xref}{\hyper@normalise\xref@}
\def\xref@#1#2{\hyper@linkurl{#2}{#1}}
\begingroup
\catcode`\_=\active
\gdef_#1{\ensuremath{\sb{\mathrm{#1}}}}
\endgroup
\mathcode`\_=\string"8000
\catcode`\_=12\relax
\usepackage[a4paper,twoside,lmargin=1in,rmargin=1in,tmargin=1in,bmargin=1in,marginparwidth=0.75in]{geometry}
\usepackage{framed}
\definecolor{shadecolor}{gray}{0.95}
\usepackage{longtable}
\usepackage[normalem]{ulem}
\usepackage{fancyvrb}
\usepackage{fancyhdr}
\usepackage{graphicx}
\usepackage{marginnote}
\renewcommand{\@cite}[1]{#1}
\renewcommand*{\marginfont}{\itshape\footnotesize}
\def\Gin@extensions{.pdf,.png,.jpg,.mps,.tif}
\pagestyle{fancy}
\usepackage[pdftitle={Decomposition of the Random Error Vector of a General Linear Model},
pdfauthor={}]{hyperref}
\hyperbaseurl{}
\paperwidth210mm
\paperheight297mm
\def\@pnumwidth{1.55em}
\def\@tocrmarg {2.55em}
\def\@dotsep{4.5}
\setcounter{tocdepth}{3}
\clubpenalty=8000
\emergencystretch 3em
\hbadness=4000
\hyphenpenalty=400
\pretolerance=750
\tolerance=2000
\vbadness=4000
\widowpenalty=10000
\renewcommand\section{\@startsection {section}{1}{\z@}%
{-1.75ex \@plus -0.5ex \@minus -.2ex}%
{0.5ex \@plus .2ex}%
{\reset@font\Large\bfseries}}
\renewcommand\subsection{\@startsection{subsection}{2}{\z@}%
{-1.75ex\@plus -0.5ex \@minus- .2ex}%
{0.5ex \@plus .2ex}%
{\reset@font\Large}}
\renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}%
{-1.5ex\@plus -0.35ex \@minus -.2ex}%
{0.5ex \@plus .2ex}%
{\reset@font\large}}
\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}%
{-1ex \@plus-0.35ex \@minus -0.2ex}%
{0.5ex \@plus .2ex}%
{\reset@font\normalsize}}
\renewcommand\subparagraph{\@startsection{subparagraph}{5}{\parindent}%
{1.5ex \@plus1ex \@minus .2ex}%
{-1em}%
{\reset@font\normalsize\bfseries}}
\def\l@section#1#2{\addpenalty{\@secpenalty} \addvspace{1.0em plus 1pt}
\@tempdima 1.5em \begingroup
\parindent \z@ \rightskip \@pnumwidth
\parfillskip -\@pnumwidth
\bfseries \leavevmode #1\hfil \hbox to\@pnumwidth{\hss #2}\par
\endgroup}
\def\l@subsection{\@dottedtocline{2}{1.5em}{2.3em}}
\def\l@subsubsection{\@dottedtocline{3}{3.8em}{3.2em}}
\def\l@paragraph{\@dottedtocline{4}{7.0em}{4.1em}}
\def\l@subparagraph{\@dottedtocline{5}{10em}{5em}}
\@ifundefined{c@section}{\newcounter{section}}{}
\@ifundefined{c@chapter}{\newcounter{chapter}}{}
\newif\if@mainmatter
\@mainmattertrue
\def\chaptername{Chapter}
\def\frontmatter{%
\pagenumbering{roman}
\def\thechapter{\@roman\c@chapter}
\def\theHchapter{\roman{chapter}}
\def\thesection{\@roman\c@section}
\def\theHsection{\roman{section}}
\def\@chapapp{}%
}
\def\mainmatter{%
\cleardoublepage
\def\thechapter{\@arabic\c@chapter}
\setcounter{chapter}{0}
\setcounter{section}{0}
\pagenumbering{arabic}
\setcounter{secnumdepth}{6}
\def\@chapapp{\chaptername}%
\def\theHchapter{\arabic{chapter}}
\def\thesection{\@arabic\c@section}
\def\theHsection{\arabic{section}}
}
\def\backmatter{%
\cleardoublepage
\setcounter{chapter}{0}
\setcounter{section}{0}
\setcounter{secnumdepth}{2}
\def\@chapapp{\appendixname}%
\def\thechapter{\@Alph\c@chapter}
\def\theHchapter{\Alph{chapter}}
\appendix
}
\newenvironment{bibitemlist}[1]{%
\list{\@biblabel{\@arabic\c@enumiv}}%
{\settowidth\labelwidth{\@biblabel{#1}}%
\leftmargin\labelwidth
\advance\leftmargin\labelsep
\@openbib@code
\usecounter{enumiv}%
\let\p@enumiv\@empty
\renewcommand\theenumiv{\@arabic\c@enumiv}%
}%
\sloppy
\clubpenalty4000
\@clubpenalty \clubpenalty
\widowpenalty4000%
\sfcode`\.\@m}%
{\def\@noitemerr
{\@latex@warning{Empty `bibitemlist' environment}}%
\endlist}
\def\tableofcontents{\section*{\contentsname}\@starttoc{toc}}
\parskip0pt
\parindent1em
\def\Panel#1#2#3#4{\multicolumn{#3}{){\columncolor{#2}}#4}{#1}}
\newenvironment{reflist}{%
\begin{raggedright}\begin{list}{}
{%
\setlength{\topsep}{0pt}%
\setlength{\rightmargin}{0.25in}%
\setlength{\itemsep}{0pt}%
\setlength{\itemindent}{0pt}%
\setlength{\parskip}{0pt}%
\setlength{\parsep}{2pt}%
\def\makelabel##1{\itshape ##1}}%
}
{\end{list}\end{raggedright}}
\newenvironment{sansreflist}{%
\begin{raggedright}\begin{list}{}
{%
\setlength{\topsep}{0pt}%
\setlength{\rightmargin}{0.25in}%
\setlength{\itemindent}{0pt}%
\setlength{\parskip}{0pt}%
\setlength{\itemsep}{0pt}%
\setlength{\parsep}{2pt}%
\def\makelabel##1{\upshape ##1}}%
}
{\end{list}\end{raggedright}}
\newenvironment{specHead}[2]%
{\vspace{20pt}\hrule\vspace{10pt}%
\phantomsection\label{#1}\markright{#2}%
\pdfbookmark[2]{#2}{#1}%
\hspace{-0.75in}{\bfseries\fontsize{16pt}{18pt}\selectfont#2}%
}{}
\def\TheFullDate{1970-01-01 (revised: 01 January 1970)}
\def\TheID{\makeatother }
\def\TheDate{1970-01-01}
\title{Decomposition of the Random Error Vector of a General Linear Model}
\author{}\makeatletter
\makeatletter
\newcommand*{\cleartoleftpage}{%
\clearpage
\if@twoside
\ifodd\c@page
\hbox{}\newpage
\if@twocolumn
\hbox{}\newpage
\fi
\fi
\fi
}
\makeatother
\makeatletter
\thispagestyle{empty}
\markright{\@title}\markboth{\@title}{\@author}
\renewcommand\small{\@setfontsize\small{9pt}{11pt}\abovedisplayskip 8.5\p@ plus3\p@ minus4\p@
\belowdisplayskip \abovedisplayskip
\abovedisplayshortskip \z@ plus2\p@
\belowdisplayshortskip 4\p@ plus2\p@ minus2\p@
\def\@listi{\leftmargin\leftmargini
\topsep 2\p@ plus1\p@ minus1\p@
\parsep 2\p@ plus\p@ minus\p@
\itemsep 1pt}
}
\makeatother
\fvset{frame=single,numberblanklines=false,xleftmargin=5mm,xrightmargin=5mm}
\fancyhf{}
\setlength{\headheight}{14pt}
\fancyhead[LE]{\bfseries\leftmark}
\fancyhead[RO]{\bfseries\rightmark}
\fancyfoot[RO]{}
\fancyfoot[CO]{\thepage}
\fancyfoot[LO]{\TheID}
\fancyfoot[LE]{}
\fancyfoot[CE]{\thepage}
\fancyfoot[RE]{\TheID}
\hypersetup{citebordercolor=0.75 0.75 0.75,linkbordercolor=0.75 0.75 0.75,urlbordercolor=0.75 0.75 0.75,bookmarksnumbered=true}
\fancypagestyle{plain}{\fancyhead{}\renewcommand{\headrulewidth}{0pt}}
\date{}
\usepackage{authblk}
\providecommand{\keywords}[1]
{
\footnotesize
\textbf{\textit{Index terms---}} #1
}
\usepackage{graphicx,xcolor}
\definecolor{GJBlue}{HTML}{273B81}
\definecolor{GJLightBlue}{HTML}{0A9DD9}
\definecolor{GJMediumGrey}{HTML}{6D6E70}
\definecolor{GJLightGrey}{HTML}{929497}
\renewenvironment{abstract}{%
\setlength{\parindent}{0pt}\raggedright
\textcolor{GJMediumGrey}{\rule{\textwidth}{2pt}}
\vskip16pt
\textcolor{GJBlue}{\large\bfseries\abstractname\space}
}{%
\vskip8pt
\textcolor{GJMediumGrey}{\rule{\textwidth}{2pt}}
\vskip16pt
}
\usepackage[absolute,overlay]{textpos}
\makeatother
\usepackage{lineno}
\linenumbers
\begin{document}
\author[1]{Jaesung Choi}
\affil[1]{ Keimyung Univerity}
\renewcommand\Authands{ and }
\date{\small \em Received: 1 January 1970 Accepted: 1 January 1970 Published: 1 January 1970}
\maketitle
\begin{abstract}
This paper deals with the decomposition of an error vector to identify how the error vector is related to the expected value of an observation vector under a general linear sample model since the error vector is defined as the deviance of observation vector from the expected value. The main idea of the paper is in that a random error vector can be decomposed into two orthogonal components vectors; i.e., one is in a vector space generated by the coefficient matrix of the unknown parameter vector and the other is in orthogonal complement of it. As related topics to the decomposition, two things are discussed: partitioning an observation vector and constructing the covariance structure of it. It also shows the reason why a projection method would be preferred rather than a least squares method.
\end{abstract}
\keywords{coefficient matrix; decomposition; least squares; orthogonal complement; projection; vector space.}
\begin{textblock*}{18cm}(1cm,1cm) % {block width} (coords)
\textcolor{GJBlue}{\LARGE Global Journals \LaTeX\ JournalKaleidoscope\texttrademark}
\end{textblock*}
\begin{textblock*}{18cm}(1.4cm,1.5cm) % {block width} (coords)
\textcolor{GJBlue}{\footnotesize \\ Artificial Intelligence formulated this projection for compatibility purposes from the original article published at Global Journals. However, this technology is currently in beta. \emph{Therefore, kindly ignore odd layouts, missed formulae, text, tables, or figures.}}
\end{textblock*}
\begin{textblock*}{10cm}(1.05cm,3cm)
{{\textit{CrossRef DOI of original article:}} \underline{}}
\end{textblock*}\let\tabcellsep&
\section[{Issue ersion I V II ( F )}]{Issue ersion I V II ( F )}\par
When there are some nonrandom quantities affecting a response variable, the analysis of data can be done by using a general linear model. There are some notable references about linear models such as \hyperref[b0]{[1]}\hyperref[b2]{[2]}\hyperref[b4]{[3]}\hyperref[b6]{[4]}. In a general linear model, the response variable is composed of two parts in general; one part is the deterministic portion as a linear function of the unknown parameters of the independent or predictor variables and the other is the random portion. When data are collected, the sample general linear model in matrix form is applicable to the data. However, the matrix equation seems not to be useful for catching any idea about the relationship between the matrix of predictors and error vector only with the assumptions about the error vector; i.e., E(?) = 0 and var(?) = ? 2 I. This requires the error vector be the line segment from the origin, which is the point 0, to the point ?. The general linear sample model in matrix form is y = X ? + ? \hyperref[b0]{(1)} where y denotes the n × 1 vector of observations, X denotes the n × p matrix of known values, ? denotes the p × 1 vector of unknown parameters and ? denotes the n × 1 vector of unobserved random errors. The detailed discussion on random errors can be seen in Searle \hyperref[b0]{[1]}. The matrix equation \hyperref[b0]{(1)} shows that the vector of observations is composed of mean vector and error vector. When E(y) is a 0 or a mean vector acting like an origin, the error vector satisfies the conditions of mean 0 and var(?) = ? 2 I. However, if E(y) = X ? then we might be interested in how to minimize the deviation vector, y = X ?. The least square method can be used as one of available methods for minimizing the error vector to estimate the parameter vector ?. However, the decomposition of an random error vector can be a little bit more comfortable and effective than the minimization of error sum of Abstract-This paper deals with the decomposition of an error vector to identify how the error vector is related to the expected value of an observation vector under a general linear sample model since the error vector is defined as the deviance of observation vector from the expected value. The main idea of the paper is in that a random error vector can be decomposed into two orthogonal components vectors; i.e., one is in a vector space generated by the coefficient matrix of the unknown parameter vector and the other is in orthogonal complement of it. As related topics to the decomposition, two things are discussed: partitioning an observation vector and constructing the covariance structure of it. It also shows the reason why a projection method would be preferred rather than a least squares method.
\section[{Issue ersion I}]{Issue ersion I}\par
V II ( F )\par
Decomposition of the Random Error Vector of a General Linear Model ? = y ? X ? is nonzero, ? is not in the column space of X. The related topics about these can be seen in Graybill \hyperref[b8]{[5]}, Johnson \hyperref[b9]{[6]} and so forth. Thus, when X ? is used as a base for getting a error vector of y that is required to have one with the shortest distance from the origin among all the error vectors, we need to break up the error vector into a few component error vectors. This is the main idea of this paper. First, we discuss how to decompose the error vector. Secondly, we study the structure of error vector adjusted for the mean vector. Thirdly, we find the structure of var(y) that is related to the error structure. Finally, we discuss a method that is useful to calculate the sum of squares associated with the error components.\par
In the matrix equation (1) let the mean vector X ? be an nonzero vector. Then the equation can be changed intoy = X ? + ? (2) = X ? + ? m + ? r\par
where ? m and ? r are denoting two component vectors of the error vector. Since E(y) is X ?, ? can be broken down into two types of error vector; one type of error vector is the one that is in column space of X, and the other has the characteristic orthogonal to an every vector in a vector space generated by the columns of X, which is an orthogonal complement of the column space of X. In matrix equation (2), y ? X ? defines the error vector based on the mean vector X ? assumed to have mean 0 and ? 2 I in n dimensional space. Since the error vector is defined as a vector of deviations from the mean vector of E(y), we can decompose it into two component vectors depending on sources where the error vector is coming up; i.e., ? m or ? r . Let's rewrite the matrix equation (2) in terms of error vector from the mean vector. Then,y ? X ? = ? m + ? r (3) = X X ? ? + (I ? X X ? )?\par
where ? m = XX ? ?, ? r = (I ? X X ? )?, and X ? = (X ? X) ?1 X ? denotes the Moore-Penrose generalized inverse where (X ? X) ?1 exists because X is a full column rank matrix. From the equation (3), we can know that there are two types of error vector when a vector space is decomposed into two orthogonal vector subspaces based on the mean vector, X ?, of y.\par
The above equation shows that E(y ? X ?) = 0 and var(y ? X ?) = ? 2 I. Here, the error vector y ? X ? is assumed as a linear combination of two different types of error vector each of which coming from two orthogonal vector subspaces. This is completely different concept of an error vector from the one we have thought traditionally. A lot of stuffs can be developed by the newly idea of viewing the error vector as the sum of a mean component error vector and a residual component error vector. Here, two specific terms are used to differentiate the types of error component: a mean component error for ? m and a residual component error for ? r .\par
Since the structure of a random error vector in matrix model ( {\ref 1}) is changed depending on the structure of the mean vector, we are going to take a look at it with a bit simpler general linear models. Consider a situation where measurements are measured as deviations from the fixed size for products from a routine process. Let y be a random variable taking an observation on a randomly selected product from a population of products. Data collected from a sample of size n from the population can be arrayed in matrix form. The ith observation of y is expressed as y i = 0 + ? i for i = 1, 2, . . . , n, where ? i 's are assumed to be squares by the method of least squares in perspective that the structure of an random error vector is primarily considered. This idea is applied to breaking up an error vector in model \hyperref[b0]{(1)}. Since X ? is in a specific vector subspace generated by the column space of X, and Decomposition of the Random Error Vector of a General Linear Model independent with E(? i ) = 0 and var(? i ) = ? 2 . Applying the sample general linear model \hyperref[b0]{(1)} to the data, the model turns out to bey = 0 + ? (4)\par
where y is in R n denoting a Euclidean n-space, and E(y) = 0. Let V 0 be the vector space consisting only of 0 and let V 1 be the orthogonal complement of V 0 . Since y is the sum of two vectors such that one in V 0 and the other in V 1 , 0 ? ? = 0 which shows the relationship between mean vector and error vector of y; i.e., an orthogonal property. To express the equation ( {\ref 4}) as the second expression in (2), ? can be divided into two terms, ? 0 and ? 1 where ? 0 denotes the error vector generated by 0 in a basis of V 0 , and ? 1 denotes the error vector generated by a basis set of V 1 . Now, the matrix equation can be expressed asy = 0 + ? 0 + ? 1 (\textbf{5})\par
where? 0 is in V 0 of dimension 0, while ? 1 is in V 1 of dimension n.\par
Adding the information about the dimension of error component vectors, the equation ( \hyperref[formula_3]{5}) can be transformed intoy ? 0 = O? + (I ? O)?\textbf{(6)}\par
where O represents n × n zero matrix and I is n × n identity matrix. The equation shows that ? from the origin of rank 0 can be decomposed into two orthogonal vectors one of which being in the orthogonal complement of a vector space. In other words, this means that ? is actually composed of a linear combination of two component vectors; i.e. ? 0 and ? 1 . It seems such valuable to grasp the structure of a random error vector for finding the covariance structure of an observation vector. Now, we can study the error structure further with a little bit general but still simpler model having just one quantitative variable as an predictor. Consider the simple linear model with only one nonrandom independent variable in addition to an intercept term; i.e.,y i = ? 0 + ? 1 X 1i + ? i , for i = 1, 2, ? ? ? , n.\par
We rewrite this in vector and matrix form asy = j? 0 + X 1 ? 1 + ? (7) = X ? + ?\par
where X = (j, X 1 ) is an n × 2 coefficient matrix of ?, ? = (? 0 , ? 1 ) ? is an n × 2 parameter vector, j is an n × 1 vector of ones, and X 1 is an n × 1 vector of quantitative values. ? is an error vector assumed to have E(?) = 0 and var(?) = ? 2 I. The equation ( {\ref 7}) is different from the one in \hyperref[b9]{(6)} in that E(y) ? = 0. This is not a surprising thing in a general linear model other than that the mean vector X ? belongs to the column space of X, V m of dimension 2 which is thought to be a vector subspace in a Euclidean n-space, R n . Since the mean vector of y, X ?, is in V m , ? should be divided into two components: one in V m and the other inV m ? denoting an orthogonal complement of V m in R n ; V m ? V m ? = R n .\par
The set of two vectors in the matrix of X can be regarded as a basis set of V m , which implies that X ? is in V m . Hence, the error vector can be divided into two component vectors such that one component in V m and the other in V m ? ; that is, ? = ? m + ? r . When we add this kind of information to the equation, the model will bey ? X ? = ? m + (I ? ? m )?\textbf{(8)}\par
wherey ? X ? ? R n , ? ? V m and (I ? ? m )? in V m ? .\par
Both the set of the columns of a matrix X of rank 2 equivalent to a basis for V m and the set of the columns of a matrix XX ? can generate the same space, V m . Hence, the equation ( \hyperref[formula_10]{8}) can be changed into where XX ? ? replaces ? m and denotes the projection of ? m onto a vector space, V m , generated by two vectors j and x. The matrix equation ( \hyperref[formula_12]{9}) turns out to bey ? X ? ? X X ? ? = (I ? X X ? )?\textbf{(9}(I ? XX ? )? = y ? (X ? + X X ? ?) (10) = (I ? X X ? )y where X ? + XX ? ? = XX ? y.\par
From the decomposition of a random error vector of a general linear sample model in matrix form, we can identify that the matrix model ( {\ref 1}) can be transformed intoy = X ? + ? (11) = X X ? y + (I ? X X ? )y\par
where y is composed of two orthogonal vectors: i.e., (XX ? y) ? (I ? X X ? )y = 0. The model equation \hyperref[b14]{(11)} implies that all types of a general linear model can be represented by a sum of two orthogonal vectors where one vector belongs to an vector subspace and the other is in the orthogonal complement of the vector space generated by the coefficient matrix of ?: i.e., XX ? y ? V m , and(I ? XX ? )y ? V ? m .\par
Here, the primary concern is actually in structural aspects of an assumed linear model while the least square method focuses only on getting the best approximate solution from a system of inconsistent equations such that X ? ? y = ? by the method of minimizing the error sum of squares. Hence, they are different approaches developed from different view of points. Now, consider the calculation of var(y). The covariance matrix of y isvar(y) = var(X X ? y + (I ? X X ? )y) (12) = ? 2 X X ? + ? 2 (I ? X X ? ) = ? 2 I\par
From the above equation \hyperref[b15]{(12)}, var(y) can be obtained by identifying the linear transformations of y; i.e., the covariance matrix of y can be partitioned as the sum of component covariance matrices, which can be done by ascertaining transformation matrices for component vectors of y. There are some referable literature related to covariance matrix such as Milliken and Johnson \hyperref[b10]{[7]}, Hill \hyperref[b11]{[8]}, and Searle \hyperref[b12]{[9]}. Hence, it is essential to figure out the coefficient matrices of component error vectors to find the projections of y onto the vector subspaces generated by the orthogonal coefficient matrices. Discussions on coefficient matrices are seen in Choi \hyperref[b13]{[10]}\hyperref[b14]{[11]}\hyperref[b15]{[12]}, where they are related to get nonnegative variance estimates.\par
As a result of the decomposition of e, we see y can be represented by the sum of two orthogonal component vectors such as \hyperref[b14]{(11)} where one is in a vector space covering the E(y) and the other is in the orthogonal complement of it. This means that XX ? y actually defines a projection of y onto the vector space spanned by the XX ? where X is coefficient matrix of ? and given as X = (j, X 1 ). For the estimation of parameter vector ? we can use the mean part of the model in matrix form of \hyperref[b14]{(11)}. From the concept of a projection in a vector space the projection of y onto a column space of X is as follows:X ? = X X ? y\textbf{(13)}\par
where E(y) = X ?. When XX ? y is viewed as the orthogonal projection of y onto a column space of X we can take ?p = X ? y as the value of ? where ?p is an notation for differentiating from ? obtained from the normal equations. When the expression in ( \hyperref[formula_17]{13}) is viewed as the system of equations, the best approximate solution to the system can be Decomposition of the Random Error Vector of a General Linear Model obtained as ? = X ? y because the system of equations is inconsistent and X is n × 2 matrix of rank 2. Although solutions of ? can be obtained in different approaches, the results are actually same. In a similar way that XX ? y can be used for the estimation of ?, a quadratic form in y can also be used for the estimation of ? 2 . Here, the required quadratic form is given asQ r = y ? (I ? X X ? )y\textbf{(14)}\par
where (I ? XX ? ) is a symmetric and idempotent matrix of rank n ? 2. Since (I ? XX ? )y is regarded as a linear transformation of y, it has all the information about the residual random error component, ? r . Hence, the quadratic form Q r in y can be used to estimate the variance ? 2 . Taking the expectation of Q r is given asE(Q r ) = E(y ? (I ? X X ? )y)\textbf{(15)}= ? 2 tr(I ? X X ? ) + (X ?) ? (I ? X X ? )X ? = ? 2 (n ? 2)\par
where tr(?) means trace of a square matrix denoted by (?), which is defined to be the sum of the diagonal elements of the square matrix. Some theorems and properties of trace can be seen in Graybill \hyperref[b2]{[2]}. As an estimate of ? 2 from the equation ( \hyperref[formula_19]{15}), ?2 p can be taken as Q r /(n ? 2) which can also be obtained by the least square method when there is no normality assumption for ?. Even though those two procedures have the same result, it should be noticed they are basically approaching from different view of point; that is, one is from the decomposition of an error vector, and the other is from the minimization of error sum of squares.\par
As for an example of a simple linear model, we consider following data from Krumbein and Graybill \hyperref[b16]{[13]}. The data are assumed to satisfy the model y i = ? 0 + ?X 1i + ? i , for i = 1, 2, ? ? ? , 10, where ? i are independent and identically distributed N(0, ? 2 ).\par
Krumbein and Graybill's Data \hyperref[b16]{[13]}. For the estimation of two unknown parameters, ? 0 and ?, we can get ?p by multiplying (X ? X) ?1 X ? on both sides of the equation \hyperref[b16]{(13)}, which is given as: The primary concern of the study is on the decomposition of an error vector in matrix form of a general linear model. When ? is n × 1 vector, the usual assumptions for error vector are sometimes given as E(?) = 0 and Var(?) = ? 2 I. Under these assumptions, an idea for breaking up the error vector lies on the thought of which the mean vector is related to the error vector because the error vector is defined to be the deviation vector from the mean of the model. When the error vector is decomposed into two orthogonal components, it is shown that a projection can be defined from the decomposition of the error vector. Hence, a partition of the vector of observations can be seen as the sum of vectors which are orthogonal projections each other. The covariance matrix of y is partitioned into two covariance matrices; that is, one for (XX ? )y, and the other for (I ? XX ? )y. This implies that the covariance matrix of a vector of observations can always be partitioned into component matrices each of which corresponding to an orthogonal projection of y respectively. From the decomposition of an error vector of a general linear model, we derived two types of estimators; one is linear transformation of y for X ? to estimate ? and the other is quadratic form in y for ? 2 . Partitioning of the covariance matrix can be useful to ascertain the covariance matrix of each component projection. It is worth to note that decomposition of an error vector is actually defines a projection of y onto a column space of X and which is quite different approach from the least square method in a point of view for an error vector.x(X ? X) ?1 X ? X ? = (X ? X) ?1 X ? XX ? y (16) ?p = (X ? X) ?1 X ? y = 0.
\section[{VI. Example}]{VI. Example}\par
Although the least squares method is very useful and accepted as one of well-known methods for estimating the unknown parameters included in a linear model, it seems not to be right for finding out whether there is any orthogonal property exists among errors. Since the least squares method concentrates only on minimizing the sum of squares of deviations of the observations from the expected values, it is not an appropriate method as a tool for getting the information on an orthogonal property between the groups of errors. The orthogonal property is extremely important in statistics especially in the analysis of variance for getting nonnegative estimates for variance components. There are lots of interesting papers \hyperref[b18]{[14]}\hyperref[b19]{[15]}\hyperref[b20]{[16]}\hyperref[b21]{[17]}\hyperref[b22]{[18]}\hyperref[b23]{[19]}\hyperref[b24]{[20]} related to the negative estimates of variance components seemed to be caused by overlooking the orthogonality. So, it is emphasized that the orthogonal property can be found by the decomposition of the random error vector. Hence, the procedure discussed on this paper is distinct from any other methods for estimating the unknown parameters in a general linear model. Not applicable.\begin{figure}[htbp]
\noindent\textbf{}\includegraphics[]{image-2.png}
\caption{\label{fig_0}}\end{figure}
\begin{figure}[htbp]
\noindent\textbf{}\includegraphics[]{image-3.png}
\caption{\label{fig_1}}\end{figure}
\begin{figure}[htbp]
\noindent\textbf{} \par
\begin{longtable}{P{0.7586319218241042\textwidth}P{0.030456026058631923\textwidth}P{0.002768729641693811\textwidth}P{0.05814332247557003\textwidth}}
982060878 ?2.312086e ? 03 ?0.002312086 6.060514e ? 06\tabcellsep 1250 550500\tabcellsep =\tabcellsep ?45.2273450 0.4462054\\
\multicolumn{4}{l}{Denoting ? 0p , and ?p as estimates of ? 0 and ? respectively, ? 0p = ?45.2273450 and}\\
\multicolumn{4}{l}{?p = 0.4462054. Least squares estimates are given as ?0 = ?45.227 and ? = 0.446. For the}\\
estimation of ? 2 , we can get an estimate as:\tabcellsep \tabcellsep \tabcellsep \\
?2\tabcellsep \tabcellsep \tabcellsep \end{longtable} \par
{\small\itshape [Note: p = y ? (I ? X X ? )y/8 = 2398.13/8 = 299.7663\hyperref[b21]{(17)} where (I ?]}
\caption{\label{tab_1}}\end{figure}
\begin{figure}[htbp]
\noindent\textbf{1} \par
\begin{longtable}{P{0.85\textwidth}}
ersion I V\\
II\\
Issue\\
Volume XXIII\\
( F )\\
Frontier Research\\
of Science\\
Global Journal\end{longtable} \par
\caption{\label{tab_2}Table 1 :}\end{figure}
\footnote{© 2023 Global Journals} \backmatter \par
This research received no external funding. Not applicable.
\subsection[{Not applicable.}]{Not applicable.}\par
The data analyzed in this study are openly available in reference number \hyperref[b16]{[13]}.\par
Not applicable.\par
The authors declare no conflict of interest.
\subsection[{VII. Discussion}]{VII. Discussion}
\subsection[{VIII. Conclusions}]{VIII. Conclusions}\par
Author Contributions:\par
Funding:\par
Institutional Review Board Statement:\par
Informed Consent Statement:\par
Data Availability Statement: Table {\ref 21}.1, pp.231, Krumbein and Graybill \hyperref[b16]{[13]}.\par
Acknowledgments:\par
Conflicts of Interest: \begin{bibitemlist}{1}
\bibitem[Mcgraw-Hill ()]{b17}\label{b17} \textit{}, Mcgraw-Hill . 1965. New York, USA.
\bibitem[Wiley and Sons ()]{b1}\label{b1} \textit{}, John Wiley , Sons . 1971. New York, USA.
\bibitem[Wiley and Sons ()]{b7}\label{b7} \textit{}, John Wiley , Sons . 1981. New York, USA.
\bibitem[Wiley and Sons ()]{b5}\label{b5} \textit{}, John Wiley , Sons . 1995. New York, USA.
\bibitem[Satterthwaite ()]{b24}\label{b24} ‘An approximate distribution of estimates of variance components’. F E Satterthwaite . \textit{Biometrics Bulletin} 1946. 2 p. .
\bibitem[Krumbein and Graybill]{b16}\label{b16} \textit{An introduction to statistical models in geology}, W C Krumbein , F A Graybill .
\bibitem[Milliken and Johnson ()]{b10}\label{b10} \textit{Analysis of messy data volume 1: designed experiments}, G A Milliken , D E Johnson . 1984. New York, USA: Van Nostrand Reinhold.
\bibitem[Johnson and Wichern ()]{b9}\label{b9} \textit{Applied multivariate statistical analysis}, D W Johnson , R A Wichern . 2014. Upper Saddle River, NJ, USA: Prentice hall.
\bibitem[Draper and Smith]{b6}\label{b6} \textit{Applied Regression Analysis}, N R Draper , H Smith .
\bibitem[Henderson ()]{b22}\label{b22} ‘Estimation of variance and covariance components’. C R Henderson . \textit{Biometrics} 1953. 9 p. .
\bibitem[Hartley ()]{b23}\label{b23} ‘Expectations, variances and covariances of ANOVA means squares by "synthesis’. H O Hartley . \textit{Biometrics} 1967. 23 p. .
\bibitem[Hill ()]{b11}\label{b11} ‘Inference about variance components in the one-way model’. B M Hill . \textit{J. Am. Stat. Assoc} 1965. 60 p. .
\bibitem[Searle]{b0}\label{b0} \textit{Linear models}, S R Searle .
\bibitem[Stapleton]{b4}\label{b4} \textit{Linear Statistical Models}, J H Stapleton .
\bibitem[Graybill ()]{b8}\label{b8} \textit{Matrices with Applications in Statistics}, F A Graybill . 1983. Wadsworth; Belmont, CA, USA.
\bibitem[Thompson ()]{b18}\label{b18} \textit{Negative estimates of variance components: an introduction; Bulletin, International Institute of Statistics}, W A Thompson . 1961. 34 p. .
\bibitem[Thompson and Moore ()]{b20}\label{b20} ‘Non-negative estimates of variance components’. W A Thompson , J R Moore . \textit{Technometrics} 1963. 5 p. .
\bibitem[Choi ()]{b13}\label{b13} ‘Nonnegative estimates of variance components in a two-way random model’. J Choi . \textit{Communications for Statistical Applications and 127 Methods}, 2019. 26 p. .
\bibitem[Choi]{b15}\label{b15} \textit{Nonnegative estimation of variance components for a nested three-way random model. symmetry 2022}, J Choi . \xref{http://dx.doi.org/10.3390/sym14061210}{10.3390/sym14061210}. \url{https://doi.org/10.3390/sym14061210} 14 p. 1210.
\bibitem[Choi ()]{b14}\label{b14} ‘Nonnegative variance component estimation for mixed-effects models’. J Choi . \textit{Communications for Statistical Applications and Methods} 2020. 27 p. .
\bibitem[Wadsworth ()]{b3}\label{b3} \textit{Notes Decomposition of the Random Error Vector of a General Linear Model}, Wadsworth . 1976. Belmont, CA, USA.
\bibitem[Nelder ()]{b21}\label{b21} \textit{The interpretation of negative components of variance}, J A Nelder . Biometrika1954, 41. p. .
\bibitem[Thompson ()]{b19}\label{b19} ‘The problem of negative estimates of variance components’. W A Thompson . \textit{Ann.Math.Stat} 1962. 33 p. .
\bibitem[Graybill]{b2}\label{b2} \textit{Theory and Application of the Linear Model}, F A Graybill .
\bibitem[Searle et al.]{b12}\label{b12} \textit{Variance components}, S R Searle , G Casella , C E Mcculloch .
\end{bibitemlist}
\end{document}