diff options
author | Reinhard Tartler <siretart@tauware.de> | 2011-10-10 17:43:39 +0200 |
---|---|---|
committer | Reinhard Tartler <siretart@tauware.de> | 2011-10-10 17:43:39 +0200 |
commit | f4092abdf94af6a99aff944d6264bc1284e8bdd4 (patch) | |
tree | 2ac1c9cc16ceb93edb2c4382c088dac5aeafdf0f /nx-X11/extras/drm/linux-core | |
parent | a840692edc9c6d19cd7c057f68e39c7d95eb767d (diff) | |
download | nx-libs-f4092abdf94af6a99aff944d6264bc1284e8bdd4.tar.gz nx-libs-f4092abdf94af6a99aff944d6264bc1284e8bdd4.tar.bz2 nx-libs-f4092abdf94af6a99aff944d6264bc1284e8bdd4.zip |
Imported nx-X11-3.1.0-1.tar.gznx-X11/3.1.0-1
Summary: Imported nx-X11-3.1.0-1.tar.gz
Keywords:
Imported nx-X11-3.1.0-1.tar.gz
into Git repository
Diffstat (limited to 'nx-X11/extras/drm/linux-core')
62 files changed, 22408 insertions, 0 deletions
diff --git a/nx-X11/extras/drm/linux-core/.cvsignore b/nx-X11/extras/drm/linux-core/.cvsignore new file mode 100644 index 000000000..c68a92aa6 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/.cvsignore @@ -0,0 +1,6 @@ +*.ko +*.mod.c +*.o.cmd +*.ko.cmd +.tmp_versions +drm_pciids.h diff --git a/nx-X11/extras/drm/linux-core/Config.in b/nx-X11/extras/drm/linux-core/Config.in new file mode 100644 index 000000000..46ba48d6f --- /dev/null +++ b/nx-X11/extras/drm/linux-core/Config.in @@ -0,0 +1,17 @@ +# +# Drm device configuration +# +# This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. +# + +tristate ' 3dfx Banshee/Voodoo3+' CONFIG_DRM_TDFX +#tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA +tristate ' ATI Rage 128' CONFIG_DRM_R128 +tristate ' ATI Radeon' CONFIG_DRM_RADEON +dep_tristate ' Intel I810' CONFIG_DRM_I810 $CONFIG_AGP +dep_tristate ' Intel 830M/845G/852GM/855GM/865G' CONFIG_DRM_I830 $CONFIG_AGP +dep_tristate ' Matrox g200/g400' CONFIG_DRM_MGA $CONFIG_AGP +tristate ' SiS' CONFIG_DRM_SIS +tristate ' Via Unichrome' CONFIG_DRM_VIA + diff --git a/nx-X11/extras/drm/linux-core/Doxyfile b/nx-X11/extras/drm/linux-core/Doxyfile new file mode 100644 index 000000000..97efeaa6f --- /dev/null +++ b/nx-X11/extras/drm/linux-core/Doxyfile @@ -0,0 +1,1161 @@ +# Doxyfile 1.3.8 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = "Direct Rendering Module" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of source +# files, where putting all generated files in the same directory would otherwise +# cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, +# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, +# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, +# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, +# Swedish, and Ukrainian. + +OUTPUT_LANGUAGE = English + +# This tag can be used to specify the encoding used in the generated output. +# The encoding is not always determined by the language that is chosen, +# but also whether or not the output is meant for Windows or non-Windows users. +# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES +# forces the Windows encoding (this is the default for the Windows binary), +# whereas setting the tag to NO uses a Unix-style encoding (the default for +# all platforms other than Windows). + +USE_WINDOWS_ENCODING = NO + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is used +# as the annotated text. Otherwise, the brief description is used as-is. If left +# blank, the following values are used ("$name" is automatically replaced with the +# name of the entity): "The $name class" "The $name widget" "The $name file" +# "is" "provides" "specifies" "contains" "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited +# members of a class in the documentation of that class as if those members were +# ordinary class members. Constructors, destructors and assignment operators of +# the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = NO + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like the Qt-style comments (thus requiring an +# explicit @brief command for a brief description. + +JAVADOC_AUTOBRIEF = YES + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the DETAILS_AT_TOP tag is set to YES then Doxygen +# will output the detailed description near the top, like JavaDoc. +# If set to NO, the detailed description appears after the member +# documentation. + +DETAILS_AT_TOP = YES + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources +# only. Doxygen will then generate output that is more tailored for Java. +# For instance, namespaces will be presented as packages, qualified scopes +# will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = NO + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = NO + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = . \ + ../shared-core + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp +# *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm + +FILE_PATTERNS = *.c \ + *.h + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories +# that are symbolic links (a Unix filesystem feature) are excluded from the input. + +EXCLUDE_SYMLINKS = YES + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. + +EXCLUDE_PATTERNS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command <filter> <input-file>, where <filter> +# is the value of the INPUT_FILTER tag, and <input-file> is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES (the default) +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES (the default) +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = NO + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = NO + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_PREDEFINED tags. + +EXPAND_ONLY_PREDEF = YES + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. + +PREDEFINED = __KERNEL__ \ + DRM(x)=x \ + __OS_HAS_AGP=1 \ + __OS_HAS_MTRR=1 + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = DRMFILE \ + DRM_IOCTL_ARGS \ + DRM_IRQ_ARGS \ + DRM_TASKQUEUE_ARGS + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse the +# parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base or +# super classes. Setting the tag to NO turns the diagrams off. Note that this +# option is superseded by the HAVE_DOT option below. This is only a fallback. It is +# recommended to install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a call dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected +# functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found on the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width +# (in pixels) of the graphs generated by dot. If a graph becomes larger than +# this value, doxygen will try to truncate the graph, so that it fits within +# the specified constraint. Beware that most browsers cannot cope with very +# large images. + +MAX_DOT_GRAPH_WIDTH = 1024 + +# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height +# (in pixels) of the graphs generated by dot. If a graph becomes larger than +# this value, doxygen will try to truncate the graph, so that it fits within +# the specified constraint. Beware that most browsers cannot cope with very +# large images. + +MAX_DOT_GRAPH_HEIGHT = 1024 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes that +# lay further from the root node will be omitted. Note that setting this option to +# 1 or 2 may greatly reduce the computation time needed for large code bases. Also +# note that a graph may be further truncated if the graph's image dimensions are +# not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH and MAX_DOT_GRAPH_HEIGHT). +# If 0 is used for the depth value (the default), the graph is not depth-constrained. + +MAX_DOT_GRAPH_DEPTH = 0 + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = NO diff --git a/nx-X11/extras/drm/linux-core/Kconfig b/nx-X11/extras/drm/linux-core/Kconfig new file mode 100644 index 000000000..d2ca98f2d --- /dev/null +++ b/nx-X11/extras/drm/linux-core/Kconfig @@ -0,0 +1,107 @@ +# +# Drm device configuration +# +# This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. +# +config DRM + bool "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" + help + Kernel-level support for the Direct Rendering Infrastructure (DRI) + introduced in XFree86 4.0. If you say Y here, you need to select + the module that's right for your graphics card from the list below. + These modules provide support for synchronization, security, and + DMA transfers. Please see <http://dri.sourceforge.net/> for more + details. You should also select and configure AGP + (/dev/agpgart) support. + +config DRM_TDFX + tristate "3dfx Banshee/Voodoo3+" + depends on DRM && PCI + help + Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), + graphics card. If M is selected, the module will be called tdfx. + +config DRM_R128 + tristate "ATI Rage 128" + depends on DRM && PCI + help + Choose this option if you have an ATI Rage 128 graphics card. If M + is selected, the module will be called r128. AGP support for + this card is strongly suggested (unless you have a PCI version). + +config DRM_RADEON + tristate "ATI Radeon" + depends on DRM && PCI + help + Choose this option if you have an ATI Radeon graphics card. There + are both PCI and AGP versions. You don't need to choose this to + run the Radeon in plain VGA mode. There is a product page at + <http://www.ati.com/na/pages/products/pc/radeon32/index.html>. + If M is selected, the module will be called radeon. + +config DRM_I810 + tristate "Intel I810" + depends on DRM && AGP && AGP_INTEL + help + Choose this option if you have an Intel I810 graphics card. If M is + selected, the module will be called i810. AGP support is required + for this driver to work. + +choice + prompt "Intel 830M, 845G, 852GM, 855GM, 865G" + depends on DRM && AGP && AGP_INTEL + optional + +config DRM_I830 + tristate "i830 driver" + help + Choose this option if you have a system that has Intel 830M, 845G, + 852GM, 855GM or 865G integrated graphics. If M is selected, the + module will be called i830. AGP support is required for this driver + to work. This driver will eventually be replaced by the i915 one. + +config DRM_I915 + tristate "i915 driver" + help + Choose this option if you have a system that has Intel 830M, 845G, + 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the + module will be called i915. AGP support is required for this driver + to work. This driver will eventually replace the I830 driver, when + later release of X start to use the new DDX and DRI. + +endchoice + +config DRM_MGA + tristate "Matrox g200/g400" + depends on DRM && (!X86_64 || BROKEN) && (!PPC || BROKEN) + help + Choose this option if you have a Matrox G200, G400, G450 or G550 + graphics card. If M is selected, the module will be called mga. + +config DRM_SIS + tristate "SiS video cards" + depends on DRM + help + Choose this option if you have a SiS 630 or compatible video + chipset. If M is selected the module will be called sis. + +config DRM_VIA + tristate "Via unichrome video cards" + depends on DRM + help + Choose this option if you have a Via unichrome or compatible video + chipset. If M is selected the module will be called via. + +config DRM_MACH64 + tristate "ATI Rage Pro (Mach64)" + depends on DRM && PCI + help + Choose this option if you have an ATI Rage Pro (mach64 chipset) + graphics card. Example cards include: 3D Rage Pro, Xpert 98, + 3D Rage LT Pro, 3D Rage XL/XC, and 3D Rage Mobility (P/M, M1). + Cards earlier than ATI Rage Pro (e.g. Rage II) are not supported. + If M is selected, the module will be called mach64. AGP support for + this card is strongly suggested (unless you have a PCI version). + + diff --git a/nx-X11/extras/drm/linux-core/Makefile b/nx-X11/extras/drm/linux-core/Makefile new file mode 100644 index 000000000..ab3609ba9 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/Makefile @@ -0,0 +1,404 @@ +# Makefile -- For the Direct Rendering Manager module (drm) +# +# Based on David Woodhouse's mtd build. +# +# Modified to handle the DRM requirements and builds on a wider range of +# platforms in a flexible way by David Dawes. It's not clear, however, +# that this approach is simpler than the old one. +# +# The purpose of this Makefile is to handle setting up everything +# needed for an out-of-kernel source build. Makefile.kernel contains +# everything required for in-kernel source builds. It is included into +# this file, so none of that should be duplicated here. +# +# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.linux,v 1.40 2003/08/17 17:12:25 dawes Exp $ +# + +# +# By default, the build is done against the running linux kernel source. +# To build against a different kernel source tree, set LINUXDIR: +# +# make LINUXDIR=/path/to/kernel/source + +# +# To build only some modules, either set DRM_MODULES to the list of modules, +# or specify the modules as targets: +# +# make r128.o radeon.o +# +# or: +# +# make DRM_MODULES="r128 radeon" +# + +SHELL=/bin/sh + +.SUFFIXES: + +ifndef LINUXDIR +RUNNING_REL := $(shell uname -r) + +LINUXDIR := $(shell if [ -e /lib/modules/$(RUNNING_REL)/source ]; then \ + echo /lib/modules/$(RUNNING_REL)/source; \ + else echo /lib/modules/$(RUNNING_REL)/build; fi) +endif + +ifndef O +O := $(LINUXDIR) +endif + +MACHINE := $(shell uname -m) + +# Modules for all architectures +MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \ + mach64.o nv.o + +# Modules only for ix86 architectures +ifneq (,$(findstring 86,$(MACHINE))) +ARCHX86 := 1 +MODULE_LIST += i830.o i810.o i915.o +endif + +ifneq (,$(findstring sparc64,$(MACHINE))) +ARCHSPARC64 := 1 +MODULE_LIST += ffb.o +endif + +DRM_MODULES ?= $(MODULE_LIST) + +# These definitions are for handling dependencies in the out of kernel build. + +DRMSHARED = drm.h drm_sarea.h +DRMHEADERS = drmP.h drm_compat.h drm_os_linux.h $(DRMSHARED) +COREHEADERS = drm_core.h + +TDFXHEADERS = tdfx_drv.h $(DRMHEADERS) +TDFXSHARED = tdfx_drv.h +R128HEADERS = r128_drv.h r128_drm.h $(DRMHEADERS) +R128SHARED = r128_drv.h r128_drm.h r128_cce.c r128_state.c r128_irq.c +RADEONHEADERS = radeon_drv.h radeon_drm.h r300_reg.h $(DRMHEADERS) +RADEONSHARED = radeon_drv.h radeon_drm.h radeon_cp.c radeon_irq.c \ + radeon_mem.c radeon_state.c r300_cmdbuf.c r300_reg.h +MGAHEADERS = mga_drv.h mga_drm.h mga_ucode.h $(DRMHEADERS) +MGASHARED = mga_dma.c mga_drm.h mga_drv.h mga_irq.c mga_state.c \ + mga_ucode.h mga_warp.c +I810HEADERS = i810_drv.h i810_drm.h $(DRMHEADERS) +I830HEADERS = i830_drv.h i830_drm.h $(DRMHEADERS) +I915HEADERS = i915_drv.h i915_drm.h $(DRMHEADERS) +I915SHARED = i915_drv.h i915_drm.h i915_irq.c i915_mem.c i915_dma.c +SISHEADERS= sis_drv.h sis_drm.h $(DRMHEADERS) +SISSHARED= sis_drv.h sis_drm.h sis_ds.c sis_ds.h sis_mm.c +SAVAGEHEADERS= savage_drv.h savage_drm.h $(DRMHEADERS) +SAVAGESHARED= savage_drv.h savage_drm.h savage_bci.c savage_state.c +VIAHEADERS = via_drm.h via_drv.h via_mm.h via_ds.h \ + via_3d_reg.h via_verifier.h $(DRMHEADERS) +VIASHARED = via_drm.h via_drv.h via_mm.h via_ds.h \ + via_3d_reg.h via_drv.c via_ds.c via_irq.c via_map.c \ + via_mm.c via_dma.c via_verifier.c via_verifier.h via_video.c +MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS) +MACH64SHARED = mach64_drv.h mach64_drm.h mach64_dma.c \ + mach64_irq.c mach64_state.c +NVHEADERS = nv_drv.h $(DRMHEADERS) +NVSHARED = nv_drv.h +FFBHEADERS = ffb_drv.h $(DRMHEADERS) + +SHAREDSRC = $(DRMSHARED) $(MGASHARED) $(R128SHARED) $(RADEONSHARED) \ + $(SISSHARED) $(TDFXSHARED) $(VIASHARED) $(MACH64SHARED) \ + $(I915SHARED) $(SAVAGESHARED) $(NVSHARED) + +PROGS = dristat drmstat + +CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c linux drm_pciids.h .tmp_versions + +# VERSION is not defined from the initial invocation. It is defined when +# this Makefile is invoked from the kernel's root Makefile. + +ifndef VERSION + +ifdef RUNNING_REL + +# SuSE has the version.h and autoconf.h headers for the current kernel +# in /boot as /boot/vmlinuz.version.h and /boot/vmlinuz.autoconf.h. +# Check these first to see if they match the running kernel. + +BOOTVERSION_PREFIX = /boot/vmlinuz. + +V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \ + grep UTS_RELEASE $(BOOTVERSION_PREFIX)version.h | \ + cut -d' ' -f3; fi) + +ifeq ($(V),"$(RUNNING_REL)") +HEADERFROMBOOT := 1 +GETCONFIG := MAKEFILES=$(shell pwd)/.config +HAVECONFIG := y +endif + +# On Red Hat we need to check if there is a .config file in the kernel +# source directory. If there isn't, we need to check if there's a +# matching file in the configs subdirectory. + +ifneq ($(HAVECONFIG),y) +HAVECONFIG := $(shell if [ -e $(LINUXDIR)/.config ]; then echo y; fi) +endif + +ifneq ($(HAVECONFIG),y) +REL_BASE := $(shell echo $(RUNNING_REL) | sed 's/-.*//') +REL_TYPE := $(shell echo $(RUNNING_REL) | sed 's/[0-9.-]//g') +ifeq ($(REL_TYPE),) +RHCONFIG := configs/kernel-$(REL_BASE)-$(MACHINE).config +else +RHCONFIG := configs/kernel-$(REL_BASE)-$(MACHINE)-$(REL_TYPE).config +endif +HAVECONFIG := $(shell if [ -e $(LINUXDIR)/$(RHCONFIG) ]; then echo y; fi) +ifneq ($(HAVECONFIG),y) +RHCONFIG := +endif +endif + +ifneq ($(HAVECONFIG),y) +ifneq ($(0),$(LINUXDIR)) +GETCONFIG += O=$(O) +endif +HAVECONFIG := $(shell if [ -e $(O)/.config ]; then echo y; fi) +endif + +ifneq ($(HAVECONFIG),y) +$(error Cannot find a kernel config file) +endif + +endif + +CLEANCONFIG := $(shell if cmp -s $(LINUXDIR)/.config .config; then echo y; fi) +ifeq ($(CLEANCONFIG),y) +CLEANFILES += $(LINUXDIR)/.config .config $(LINUXDIR)/tmp_include_depends +endif + +all: modules + +modules: includes + make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules + +ifeq ($(HEADERFROMBOOT),1) + +BOOTHEADERS = version.h autoconf.h +BOOTCONFIG = .config + +CLEANFILES += $(BOOTHEADERS) $(BOOTCONFIG) + +includes:: $(BOOTHEADERS) $(BOOTCONFIG) + +version.h: $(BOOTVERSION_PREFIX)version.h + rm -f $@ + ln -s $< $@ + +autoconf.h: $(BOOTVERSION_PREFIX)autoconf.h + rm -f $@ + ln -s $< $@ + +.config: $(BOOTVERSION_PREFIX)config + rm -f $@ + ln -s $< $@ +endif + +# This prepares an unused Red Hat kernel tree for the build. +ifneq ($(RHCONFIG),) +includes:: $(LINUXDIR)/.config $(LINUXDIR)/tmp_include_depends .config + +$(LINUXDIR)/.config: $(LINUXDIR)/$(RHCONFIG) + rm -f $@ + ln -s $< $@ + +.config: $(LINUXDIR)/$(RHCONFIG) + rm -f $@ + ln -s $< $@ + +$(LINUXDIR)/tmp_include_depends: + echo all: > $@ +endif + +# Make sure that the shared source files are linked into this directory. + + +SHAREDDIR := ../shared-core + +HASSHARED := $(shell if [ -d $(SHAREDDIR) ]; then echo y; fi) + +ifeq ($(HASSHARED),y) +includes:: $(SHAREDSRC) drm_pciids.h + +drm_pciids.h: $(SHAREDDIR)/drm_pciids.txt + sh ../scripts/create_linux_pci_lists.sh < $(SHAREDDIR)/drm_pciids.txt + +$(SHAREDSRC): + @if [ -r $(SHAREDDIR)/$@ ]; then \ + (rm -f $@; set -x; ln -s $(SHAREDDIR)/$@ $@); fi + +CLEANFILES += $(SHAREDSRC) +endif + +includes:: linux + +linux: + rm -f linux + ln -s . linux + +clean cleandir: + rm -rf $(CLEANFILES) + +$(MODULE_LIST):: + make DRM_MODULES=$@ modules + +# Build test utilities + +PRGCFLAGS = $(CFLAGS) -g -ansi -pedantic -DPOSIX_C_SOURCE=199309L \ + -D_POSIX_SOURCE -D_XOPEN_SOURCE -D_BSD_SOURCE -D_SVID_SOURCE \ + -I. -I../../.. + +DRMSTATLIBS = -L../../.. -L.. -ldrm -lxf86_os \ + -L../../../../dummylib -ldummy -lm + +programs: $(PROGS) + +dristat: dristat.c + $(CC) $(PRGCFLAGS) $< -o $@ + +drmstat: drmstat.c + $(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS) + +else + +# Check for kernel versions that we don't support. + +BELOW24 := $(shell if [ $(VERSION) -lt 2 -o $(PATCHLEVEL) -lt 4 ]; then \ + echo y; fi) + +ifeq ($(BELOW24),y) +$(error Only 2.4.x and later kernels are supported \ + ($(VERSION).$(PATCHLEVEL).$(SUBLEVEL))) +endif + +ifdef ARCHX86 +ifndef CONFIG_X86_CMPXCHG +$(error CONFIG_X86_CMPXCHG needs to be enabled in the kernel) +endif +endif + +# This needs to go before all other include paths. +CC += -I$(DRMSRCDIR) + +# Check for Red Hat's 4-argument do_munmap(). +DOMUNMAP := $(shell grep do_munmap $(LINUXDIR)/include/linux/mm.h | \ + grep -c acct) + +ifneq ($(DOMUNMAP),0) +EXTRA_CFLAGS += -DDO_MUNMAP_4_ARGS +endif + +# Check for 5-argument remap_page_range() in RH9 kernel, and 2.5.x kernels +RPR := $(shell grep remap_page_range $(LINUXDIR)/include/linux/mm.h | \ + grep -c vma) + +ifneq ($(RPR),0) +EXTRA_CFLAGS += -DREMAP_PAGE_RANGE_5_ARGS +endif + +# Check for 4-argument vmap() in some 2.5.x and 2.4.x kernels +VMAP := $(shell grep -A1 'vmap.*count,$$' $(LINUXDIR)/include/linux/vmalloc.h | \ + grep -c prot) + +ifneq ($(VMAP),0) +EXTRA_CFLAGS += -DVMAP_4_ARGS +endif + +# Check for PAGE_AGP definition +PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ + grep -c PAGE_AGP) + +ifneq ($(PAGE_AGP),0) +EXTRA_CFLAGS += -DHAVE_PAGE_AGP +endif + + +# Start with all modules turned off. +CONFIG_DRM_GAMMA := n +CONFIG_DRM_TDFX := n +CONFIG_DRM_MGA := n +CONFIG_DRM_I810 := n +CONFIG_DRM_R128 := n +CONFIG_DRM_RADEON := n +CONFIG_DRM_I830 := n +CONFIG_DRM_I915 := n +CONFIG_DRM_SIS := n +CONFIG_DRM_FFB := n +CONFIG_DRM_SAVAGE := n +CONFIG_DRM_VIA := n +CONFIG_DRM_MACH64 := n +CONFIG_DRM_NV := n + +# Enable module builds for the modules requested/supported. + +ifneq (,$(findstring tdfx,$(DRM_MODULES))) +CONFIG_DRM_TDFX := m +endif +ifneq (,$(findstring r128,$(DRM_MODULES))) +CONFIG_DRM_R128 := m +endif +ifneq (,$(findstring radeon,$(DRM_MODULES))) +CONFIG_DRM_RADEON := m +endif +ifneq (,$(findstring sis,$(DRM_MODULES))) +CONFIG_DRM_SIS := m +endif +ifneq (,$(findstring via,$(DRM_MODULES))) +CONFIG_DRM_VIA := m +endif +ifneq (,$(findstring mach64,$(DRM_MODULES))) +CONFIG_DRM_MACH64 := m +endif +ifneq (,$(findstring ffb,$(DRM_MODULES))) +CONFIG_DRM_FFB := m +endif +ifneq (,$(findstring savage,$(DRM_MODULES))) +CONFIG_DRM_SAVAGE := m +endif +ifneq (,$(findstring mga,$(DRM_MODULES))) +CONFIG_DRM_MGA := m +endif +ifneq (,$(findstring nv,$(DRM_MODULES))) +CONFIG_DRM_NV := m +endif + +# These require AGP support + +ifdef CONFIG_AGP +ifneq (,$(findstring i810,$(DRM_MODULES))) +CONFIG_DRM_I810 := m +endif +ifneq (,$(findstring i830,$(DRM_MODULES))) +CONFIG_DRM_I830 := m +endif +ifneq (,$(findstring i915,$(DRM_MODULES))) +CONFIG_DRM_I915 := m +endif +endif + +include $(DRMSRCDIR)/Makefile.kernel + +# Depencencies +$(drm-objs): $(DRMHEADERS) $(COREHEADERS) +$(tdfx-objs): $(TDFXHEADERS) +$(r128-objs): $(R128HEADERS) +$(mga-objs): $(MGAHEADERS) +$(i810-objs): $(I810HEADERS) +$(i830-objs): $(I830HEADERS) +$(i915-objs): $(I915HEADERS) +$(radeon-objs): $(RADEONHEADERS) +$(sis-objs): $(SISHEADERS) +$(ffb-objs): $(FFBHEADERS) +$(savage-objs): $(SAVAGEHEADERS) +$(via-objs): $(VIAHEADERS) +$(mach64-objs): $(MACH64HEADERS) +$(nv-objs): $(NVHEADERS) + +endif + diff --git a/nx-X11/extras/drm/linux-core/Makefile.kernel b/nx-X11/extras/drm/linux-core/Makefile.kernel new file mode 100644 index 000000000..d63aabb64 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/Makefile.kernel @@ -0,0 +1,51 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. +# +# Based on David Woodhouse's mtd build. +# +# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.18 2003/08/16 17:59:17 dawes Exp $ +# + +drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ + drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ + drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ + drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ + drm_memory_debug.o ati_pcigart.o +tdfx-objs := tdfx_drv.o +r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o +mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o +i810-objs := i810_drv.o i810_dma.o +i830-objs := i830_drv.o i830_dma.o i830_irq.o +i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o +radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o +sis-objs := sis_drv.o sis_ds.o sis_mm.o +ffb-objs := ffb_drv.o ffb_context.o +savage-objs := savage_drv.o savage_bci.o savage_state.o +via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o \ + via_video.o via_dmablit.o +mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o +nv-objs := nv_drv.o + +ifeq ($(CONFIG_COMPAT),y) +drm-objs += drm_ioc32.o +radeon-objs += radeon_ioc32.o +mga-objs += mga_ioc32.o +r128-objs += r128_ioc32.o +i915-objs += i915_ioc32.o +endif + +obj-m += drm.o +obj-$(CONFIG_DRM_TDFX) += tdfx.o +obj-$(CONFIG_DRM_R128) += r128.o +obj-$(CONFIG_DRM_RADEON)+= radeon.o +obj-$(CONFIG_DRM_MGA) += mga.o +obj-$(CONFIG_DRM_I810) += i810.o +obj-$(CONFIG_DRM_I830) += i830.o +obj-$(CONFIG_DRM_I915) += i915.o +obj-$(CONFIG_DRM_SIS) += sis.o +obj-$(CONFIG_DRM_FFB) += ffb.o +obj-$(CONFIG_DRM_SAVAGE)+= savage.o +obj-$(CONFIG_DRM_VIA) += via.o +obj-$(CONFIG_DRM_MACH64)+= mach64.o +obj-$(CONFIG_DRM_NV) += nv.o diff --git a/nx-X11/extras/drm/linux-core/README.drm b/nx-X11/extras/drm/linux-core/README.drm new file mode 100644 index 000000000..6441e01e5 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/README.drm @@ -0,0 +1,46 @@ +************************************************************ +* For the very latest on DRI development, please see: * +* http://dri.sourceforge.net/ * +************************************************************ + +The Direct Rendering Manager (drm) is a device-independent kernel-level +device driver that provides support for the XFree86 Direct Rendering +Infrastructure (DRI). + +The DRM supports the Direct Rendering Infrastructure (DRI) in four major +ways: + + 1. The DRM provides synchronized access to the graphics hardware via + the use of an optimized two-tiered lock. + + 2. The DRM enforces the DRI security policy for access to the graphics + hardware by only allowing authenticated X11 clients access to + restricted regions of memory. + + 3. The DRM provides a generic DMA engine, complete with multiple + queues and the ability to detect the need for an OpenGL context + switch. + + 4. The DRM is extensible via the use of small device-specific modules + that rely extensively on the API exported by the DRM module. + + +Documentation on the DRI is available from: + http://precisioninsight.com/piinsights.html + +For specific information about kernel-level support, see: + + The Direct Rendering Manager, Kernel Support for the Direct Rendering + Infrastructure + http://precisioninsight.com/dr/drm.html + + Hardware Locking for the Direct Rendering Infrastructure + http://precisioninsight.com/dr/locking.html + + A Security Analysis of the Direct Rendering Infrastructure + http://precisioninsight.com/dr/security.html + +************************************************************ +* For the very latest on DRI development, please see: * +* http://dri.sourceforge.net/ * +************************************************************ diff --git a/nx-X11/extras/drm/linux-core/ati_pcigart.c b/nx-X11/extras/drm/linux-core/ati_pcigart.c new file mode 100644 index 000000000..404c5b73a --- /dev/null +++ b/nx-X11/extras/drm/linux-core/ati_pcigart.c @@ -0,0 +1,228 @@ +/** + * \file ati_pcigart.c + * ATI PCI GART support + * + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com + * + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +#if PAGE_SIZE == 65536 +# define ATI_PCIGART_TABLE_ORDER 0 +# define ATI_PCIGART_TABLE_PAGES (1 << 0) +#elif PAGE_SIZE == 16384 +# define ATI_PCIGART_TABLE_ORDER 1 +# define ATI_PCIGART_TABLE_PAGES (1 << 1) +#elif PAGE_SIZE == 8192 +# define ATI_PCIGART_TABLE_ORDER 2 +# define ATI_PCIGART_TABLE_PAGES (1 << 2) +#elif PAGE_SIZE == 4096 +# define ATI_PCIGART_TABLE_ORDER 3 +# define ATI_PCIGART_TABLE_PAGES (1 << 3) +#else +# error - PAGE_SIZE not 64K, 16K, 8K or 4K +#endif + +# define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */ +# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ + +static void *drm_ati_alloc_pcigart_table(void) +{ + unsigned long address; + struct page *page; + int i; + DRM_DEBUG("%s\n", __FUNCTION__); + + address = __get_free_pages(GFP_KERNEL, ATI_PCIGART_TABLE_ORDER); + if (address == 0UL) { + return 0; + } + + page = virt_to_page(address); + + for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) { + get_page(page); + SetPageReserved(page); + } + + DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address); + return (void *)address; +} + +static void drm_ati_free_pcigart_table(void *address) +{ + struct page *page; + int i; + DRM_DEBUG("%s\n", __FUNCTION__); + + page = virt_to_page((unsigned long)address); + + for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) { + __put_page(page); + ClearPageReserved(page); + } + + free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER); +} + +int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info) +{ + drm_sg_mem_t *entry = dev->sg; + void *address = NULL; + unsigned long pages; + u32 *pci_gart, page_base, bus_address = 0; + int i, j, ret = 0; + + if (!entry) { + DRM_ERROR("no scatter/gather memory!\n"); + goto done; + } + + if (gart_info->gart_table_location==DRM_ATI_GART_MAIN) + { + DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); + + address = drm_ati_alloc_pcigart_table(); + if (!address) { + DRM_ERROR("cannot allocate PCI GART page!\n"); + goto done; + } + + if (!dev->pdev) { + DRM_ERROR("PCI device unknown!\n"); + goto done; + } + + bus_address = pci_map_single(dev->pdev, address, + ATI_PCIGART_TABLE_PAGES * PAGE_SIZE, + PCI_DMA_TODEVICE); + if (bus_address == 0) { + DRM_ERROR("unable to map PCIGART pages!\n"); + drm_ati_free_pcigart_table(address); + address = 0; + goto done; + } + } + else + { + address = gart_info->addr; + bus_address = gart_info->bus_addr; + DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n", bus_address, (unsigned long)address); + } + + pci_gart = (u32 *) address; + + pages = (entry->pages <= ATI_MAX_PCIGART_PAGES) + ? entry->pages : ATI_MAX_PCIGART_PAGES; + + memset(pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32)); + + for (i = 0; i < pages; i++) { + /* we need to support large memory configurations */ + entry->busaddr[i] = pci_map_single(dev->pdev, + page_address(entry-> + pagelist[i]), + PAGE_SIZE, PCI_DMA_TODEVICE); + if (entry->busaddr[i] == 0) { + DRM_ERROR("unable to map PCIGART pages!\n"); + drm_ati_pcigart_cleanup(dev, gart_info); + address = NULL; + bus_address = 0; + goto done; + } + page_base = (u32) entry->busaddr[i]; + + for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { + if (gart_info->is_pcie) + *pci_gart = (cpu_to_le32(page_base)>>8) | 0xc; + else + *pci_gart = cpu_to_le32(page_base); + pci_gart++; + page_base += ATI_PCIGART_PAGE_SIZE; + } + } + + ret = 1; + +#if defined(__i386__) || defined(__x86_64__) + wbinvd(); +#else + mb(); +#endif + + done: + gart_info->addr = address; + gart_info->bus_addr = bus_address; + return ret; +} +EXPORT_SYMBOL(drm_ati_pcigart_init); + +int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info) +{ + drm_sg_mem_t *entry = dev->sg; + unsigned long pages; + int i; + + /* we need to support large memory configurations */ + if (!entry) { + DRM_ERROR("no scatter/gather memory!\n"); + return 0; + } + + if (gart_info->bus_addr) { + if (gart_info->gart_table_location==DRM_ATI_GART_MAIN) { + pci_unmap_single(dev->pdev, gart_info->bus_addr, + ATI_PCIGART_TABLE_PAGES * PAGE_SIZE, + PCI_DMA_TODEVICE); + } + + pages = (entry->pages <= ATI_MAX_PCIGART_PAGES) + ? entry->pages : ATI_MAX_PCIGART_PAGES; + + for (i = 0; i < pages; i++) { + if (!entry->busaddr[i]) + break; + pci_unmap_single(dev->pdev, entry->busaddr[i], + PAGE_SIZE, PCI_DMA_TODEVICE); + } + + if (gart_info->gart_table_location==DRM_ATI_GART_MAIN) + gart_info->bus_addr=0; + } + + + if (gart_info->gart_table_location==DRM_ATI_GART_MAIN && gart_info->addr) { + drm_ati_free_pcigart_table(gart_info->addr); + gart_info->addr=0; + } + + return 1; +} +EXPORT_SYMBOL(drm_ati_pcigart_cleanup); + diff --git a/nx-X11/extras/drm/linux-core/drmP.h b/nx-X11/extras/drm/linux-core/drmP.h new file mode 100644 index 000000000..7b344c7ab --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drmP.h @@ -0,0 +1,1080 @@ +/** + * \file drmP.h + * Private header for Direct Rendering Manager + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_P_H_ +#define _DRM_P_H_ + +#ifdef __KERNEL__ +#ifdef __alpha__ +/* add include of current.h so that "current" is defined + * before static inline funcs in wait.h. Doing this so we + * can build the DRM (part of PI DRI). 4/21/2000 S + B */ +#include <asm/current.h> +#endif /* __alpha__ */ +#include <linux/config.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/init.h> +#include <linux/file.h> +#include <linux/pci.h> +#include <linux/version.h> +#include <linux/sched.h> +#include <linux/smp_lock.h> /* For (un)lock_kernel */ +#include <linux/mm.h> +#include <linux/pagemap.h> +#if defined(__alpha__) || defined(__powerpc__) +#include <asm/pgtable.h> /* For pte_wrprotect */ +#endif +#include <asm/io.h> +#include <asm/mman.h> +#include <asm/uaccess.h> +#ifdef CONFIG_MTRR +#include <asm/mtrr.h> +#endif +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) +#include <linux/types.h> +#include <linux/agp_backend.h> +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41) +#define HAS_WORKQUEUE 0 +#else +#define HAS_WORKQUEUE 1 +#endif +#if !HAS_WORKQUEUE +#include <linux/tqueue.h> +#else +#include <linux/workqueue.h> +#endif +#include <linux/poll.h> +#include <asm/pgalloc.h> +#include "drm.h" + +#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) +#define __OS_HAS_MTRR (defined(CONFIG_MTRR)) + +#include "drm_os_linux.h" + +/* If you want the memory alloc debug functionality, change define below */ +/* #define DEBUG_MEMORY */ + +/***********************************************************************/ +/** \name DRM template customization defaults */ +/*@{*/ + +/* driver capabilities and requirements mask */ +#define DRIVER_USE_AGP 0x1 +#define DRIVER_REQUIRE_AGP 0x2 +#define DRIVER_USE_MTRR 0x4 +#define DRIVER_PCI_DMA 0x8 +#define DRIVER_SG 0x10 +#define DRIVER_HAVE_DMA 0x20 +#define DRIVER_HAVE_IRQ 0x40 +#define DRIVER_IRQ_SHARED 0x80 +#define DRIVER_IRQ_VBL 0x100 +#define DRIVER_DMA_QUEUE 0x200 +#define DRIVER_FB_DMA 0x400 + + +/*@}*/ + +/***********************************************************************/ +/** \name Begin the DRM... */ +/*@{*/ + +#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then + also include looping detection. */ + +#define DRM_HASH_SIZE 16 /**< Size of key hash table. Must be power of 2. */ +#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ +#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ +#define DRM_LOOPING_LIMIT 5000000 +#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */ +#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */ + +#define DRM_FLAG_DEBUG 0x01 + +#define DRM_MEM_DMA 0 +#define DRM_MEM_SAREA 1 +#define DRM_MEM_DRIVER 2 +#define DRM_MEM_MAGIC 3 +#define DRM_MEM_IOCTLS 4 +#define DRM_MEM_MAPS 5 +#define DRM_MEM_VMAS 6 +#define DRM_MEM_BUFS 7 +#define DRM_MEM_SEGS 8 +#define DRM_MEM_PAGES 9 +#define DRM_MEM_FILES 10 +#define DRM_MEM_QUEUES 11 +#define DRM_MEM_CMDS 12 +#define DRM_MEM_MAPPINGS 13 +#define DRM_MEM_BUFLISTS 14 +#define DRM_MEM_AGPLISTS 15 +#define DRM_MEM_TOTALAGP 16 +#define DRM_MEM_BOUNDAGP 17 +#define DRM_MEM_CTXBITMAP 18 +#define DRM_MEM_STUB 19 +#define DRM_MEM_SGLISTS 20 +#define DRM_MEM_CTXLIST 21 + +#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) + +/*@}*/ + +#include "drm_compat.h" + +/***********************************************************************/ +/** \name Macros to make printk easier */ +/*@{*/ + +/** + * Error output. + * + * \param fmt printf() like format string. + * \param arg arguments + */ +#define DRM_ERROR(fmt, arg...) \ + printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg) + +/** + * Memory error output. + * + * \param area memory area where the error occurred. + * \param fmt printf() like format string. + * \param arg arguments + */ +#define DRM_MEM_ERROR(area, fmt, arg...) \ + printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ + drm_mem_stats[area].name , ##arg) +#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) + +/** + * Debug output. + * + * \param fmt printf() like format string. + * \param arg arguments + */ +#if DRM_DEBUG_CODE +#define DRM_DEBUG(fmt, arg...) \ + do { \ + if ( drm_debug ) \ + printk(KERN_DEBUG \ + "[" DRM_NAME ":%s] " fmt , \ + __FUNCTION__ , ##arg); \ + } while (0) +#else +#define DRM_DEBUG(fmt, arg...) do { } while (0) +#endif + +#define DRM_PROC_LIMIT (PAGE_SIZE-80) + +#define DRM_PROC_PRINT(fmt, arg...) \ + len += sprintf(&buf[len], fmt , ##arg); \ + if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; } + +#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \ + len += sprintf(&buf[len], fmt , ##arg); \ + if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } + +/*@}*/ + +/***********************************************************************/ +/** \name Internal types and structures */ +/*@{*/ + +#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) +#define DRM_MIN(a,b) min(a,b) +#define DRM_MAX(a,b) max(a,b) + +#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) +#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) +#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) + +#define DRM_IF_VERSION(maj, min) (maj << 16 | min) +/** + * Get the private SAREA mapping. + * + * \param _dev DRM device. + * \param _ctx context number. + * \param _map output mapping. + */ +#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ + (_map) = (_dev)->context_sareas[_ctx]; \ +} while(0) + +/** + * Test that the hardware lock is held by the caller, returning otherwise. + * + * \param dev DRM device. + * \param filp file pointer of the caller. + */ +#define LOCK_TEST_WITH_RETURN( dev, filp ) \ +do { \ + if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ + dev->lock.filp != filp ) { \ + DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ + __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ + dev->lock.filp, filp ); \ + return -EINVAL; \ + } \ +} while (0) + +/** + * Copy and IOCTL return string to user space + */ +#define DRM_COPY( name, value ) \ + len = strlen( value ); \ + if ( len > name##_len ) len = name##_len; \ + name##_len = strlen( value ); \ + if ( len && name ) { \ + if ( copy_to_user( name, value, len ) ) \ + return -EFAULT; \ + } + +/** + * Ioctl function type. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg argument. + */ +typedef int drm_ioctl_t(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, + unsigned long arg); + +#define DRM_AUTH 0x1 +#define DRM_MASTER 0x2 +#define DRM_ROOT_ONLY 0x4 + +typedef struct drm_ioctl_desc { + drm_ioctl_t *func; + int flags; +} drm_ioctl_desc_t; + +typedef struct drm_devstate { + pid_t owner; /**< X server pid holding x_lock */ +} drm_devstate_t; + +typedef struct drm_magic_entry { + drm_magic_t magic; + struct drm_file *priv; + struct drm_magic_entry *next; +} drm_magic_entry_t; + +typedef struct drm_magic_head { + struct drm_magic_entry *head; + struct drm_magic_entry *tail; +} drm_magic_head_t; + +typedef struct drm_vma_entry { + struct vm_area_struct *vma; + struct drm_vma_entry *next; + pid_t pid; +} drm_vma_entry_t; + +/** + * DMA buffer. + */ +typedef struct drm_buf { + int idx; /**< Index into master buflist */ + int total; /**< Buffer size */ + int order; /**< log-base-2(total) */ + int used; /**< Amount of buffer in use (for DMA) */ + unsigned long offset; /**< Byte offset (used internally) */ + void *address; /**< Address of buffer */ + unsigned long bus_address; /**< Bus address of buffer */ + struct drm_buf *next; /**< Kernel-only: used for free list */ + __volatile__ int waiting; /**< On kernel DMA queue */ + __volatile__ int pending; /**< On hardware DMA queue */ + wait_queue_head_t dma_wait; /**< Processes waiting */ + struct file *filp; /**< Pointer to holding file descr */ + int context; /**< Kernel queue for this buffer */ + int while_locked; /**< Dispatch this buffer while locked */ + enum { + DRM_LIST_NONE = 0, + DRM_LIST_FREE = 1, + DRM_LIST_WAIT = 2, + DRM_LIST_PEND = 3, + DRM_LIST_PRIO = 4, + DRM_LIST_RECLAIM = 5 + } list; /**< Which list we're on */ + + int dev_priv_size; /**< Size of buffer private storage */ + void *dev_private; /**< Per-buffer private storage */ +} drm_buf_t; + +/** bufs is one longer than it has to be */ +typedef struct drm_waitlist { + int count; /**< Number of possible buffers */ + drm_buf_t **bufs; /**< List of pointers to buffers */ + drm_buf_t **rp; /**< Read pointer */ + drm_buf_t **wp; /**< Write pointer */ + drm_buf_t **end; /**< End pointer */ + spinlock_t read_lock; + spinlock_t write_lock; +} drm_waitlist_t; + +typedef struct drm_freelist { + int initialized; /**< Freelist in use */ + atomic_t count; /**< Number of free buffers */ + drm_buf_t *next; /**< End pointer */ + + wait_queue_head_t waiting; /**< Processes waiting on free bufs */ + int low_mark; /**< Low water mark */ + int high_mark; /**< High water mark */ + atomic_t wfh; /**< If waiting for high mark */ + spinlock_t lock; +} drm_freelist_t; + +/** + * Buffer entry. There is one of this for each buffer size order. + */ +typedef struct drm_buf_entry { + int buf_size; /**< size */ + int buf_count; /**< number of buffers */ + drm_buf_t *buflist; /**< buffer list */ + int seg_count; + int page_order; + unsigned long *seglist; + + drm_freelist_t freelist; +} drm_buf_entry_t; + +/** File private data */ +typedef struct drm_file { + int authenticated; + int master; + int minor; + pid_t pid; + uid_t uid; + drm_magic_t magic; + unsigned long ioctl_count; + struct drm_file *next; + struct drm_file *prev; + struct drm_head *head; + int remove_auth_on_close; + unsigned long lock_count; + void *driver_priv; +} drm_file_t; + +/** Wait queue */ +typedef struct drm_queue { + atomic_t use_count; /**< Outstanding uses (+1) */ + atomic_t finalization; /**< Finalization in progress */ + atomic_t block_count; /**< Count of processes waiting */ + atomic_t block_read; /**< Queue blocked for reads */ + wait_queue_head_t read_queue; /**< Processes waiting on block_read */ + atomic_t block_write; /**< Queue blocked for writes */ + wait_queue_head_t write_queue; /**< Processes waiting on block_write */ +#if 1 + atomic_t total_queued; /**< Total queued statistic */ + atomic_t total_flushed; /**< Total flushes statistic */ + atomic_t total_locks; /**< Total locks statistics */ +#endif + drm_ctx_flags_t flags; /**< Context preserving and 2D-only */ + drm_waitlist_t waitlist; /**< Pending buffers */ + wait_queue_head_t flush_queue; /**< Processes waiting until flush */ +} drm_queue_t; + +/** + * Lock data. + */ +typedef struct drm_lock_data { + drm_hw_lock_t *hw_lock; /**< Hardware lock */ + struct file *filp; /**< File descr of lock holder (0=kernel) */ + wait_queue_head_t lock_queue; /**< Queue of blocked processes */ + unsigned long lock_time; /**< Time of last lock in jiffies */ +} drm_lock_data_t; + +/** + * DMA data. + */ +typedef struct drm_device_dma { + + drm_buf_entry_t bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ + int buf_count; /**< total number of buffers */ + drm_buf_t **buflist; /**< Vector of pointers into drm_device_dma::bufs */ + int seg_count; + int page_count; /**< number of pages */ + unsigned long *pagelist; /**< page list */ + unsigned long byte_count; + enum { + _DRM_DMA_USE_AGP = 0x01, + _DRM_DMA_USE_SG = 0x02, + _DRM_DMA_USE_FB = 0x04 + } flags; + +} drm_device_dma_t; + +/** + * AGP memory entry. Stored as a doubly linked list. + */ +typedef struct drm_agp_mem { + unsigned long handle; /**< handle */ + DRM_AGP_MEM *memory; + unsigned long bound; /**< address */ + int pages; + struct drm_agp_mem *prev; /**< previous entry */ + struct drm_agp_mem *next; /**< next entry */ +} drm_agp_mem_t; + +/** + * AGP data. + * + * \sa drm_agp_init)() and drm_device::agp. + */ +typedef struct drm_agp_head { + DRM_AGP_KERN agp_info; /**< AGP device information */ + drm_agp_mem_t *memory; /**< memory entries */ + unsigned long mode; /**< AGP mode */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) + struct agp_bridge_data *bridge; +#endif + int enabled; /**< whether the AGP bus as been enabled */ + int acquired; /**< whether the AGP device has been acquired */ + unsigned long base; + int agp_mtrr; + int cant_use_aperture; + unsigned long page_mask; +} drm_agp_head_t; + +/** + * Scatter-gather memory. + */ +typedef struct drm_sg_mem { + unsigned long handle; + void *virtual; + int pages; + struct page **pagelist; + dma_addr_t *busaddr; +} drm_sg_mem_t; + +typedef struct drm_sigdata { + int context; + drm_hw_lock_t *lock; +} drm_sigdata_t; + +typedef struct drm_dma_handle { + dma_addr_t busaddr; + void *vaddr; + size_t size; +} drm_dma_handle_t; + +/** + * Mappings list + */ +typedef struct drm_map_list { + struct list_head head; /**< list head */ + drm_map_t *map; /**< mapping */ + unsigned int user_token; +} drm_map_list_t; + +typedef drm_map_t drm_local_map_t; + +/** + * Context handle list + */ +typedef struct drm_ctx_list { + struct list_head head; /**< list head */ + drm_context_t handle; /**< context handle */ + drm_file_t *tag; /**< associated fd private data */ +} drm_ctx_list_t; + +typedef struct drm_vbl_sig { + struct list_head head; + unsigned int sequence; + struct siginfo info; + struct task_struct *task; +} drm_vbl_sig_t; + +/* location of GART table */ +#define DRM_ATI_GART_MAIN 1 +#define DRM_ATI_GART_FB 2 + +typedef struct ati_pcigart_info { + int gart_table_location; + int is_pcie; + void *addr; + dma_addr_t bus_addr; + drm_local_map_t mapping; +} drm_ati_pcigart_info; + +/** + * DRM driver structure. This structure represent the common code for + * a family of cards. There will one drm_device for each card present + * in this family + */ +struct drm_device; +struct drm_driver { + int (*load) (struct drm_device *, unsigned long flags); + int (*firstopen) (struct drm_device *); + int (*open) (struct drm_device *, drm_file_t *); + void (*preclose) (struct drm_device *, struct file * filp); + void (*postclose) (struct drm_device *, drm_file_t *); + void (*lastclose) (struct drm_device *); + int (*unload) (struct drm_device *); + int (*dma_ioctl) (DRM_IOCTL_ARGS); + void (*dma_ready) (struct drm_device *); + int (*dma_quiescent) (struct drm_device *); + int (*context_ctor) (struct drm_device * dev, int context); + int (*context_dtor) (struct drm_device * dev, int context); + int (*kernel_context_switch) (struct drm_device * dev, int old, + int new); + int (*kernel_context_switch_unlock) (struct drm_device * dev); + int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence); + int (*dri_library_name) (struct drm_device * dev, char * buf); + + /** + * Called by \c drm_device_is_agp. Typically used to determine if a + * card is really attached to AGP or not. + * + * \param dev DRM device handle + * + * \returns + * One of three values is returned depending on whether or not the + * card is absolutely \b not AGP (return of 0), absolutely \b is AGP + * (return of 1), or may or may not be AGP (return of 2). + */ + int (*device_is_agp) (struct drm_device * dev); + +/* these have to be filled in */ + irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); + void (*irq_preinstall) (struct drm_device * dev); + void (*irq_postinstall) (struct drm_device * dev); + void (*irq_uninstall) (struct drm_device * dev); + void (*reclaim_buffers) (struct drm_device *dev, struct file * filp); + void (*reclaim_buffers_locked) (struct drm_device *dev, + struct file * filp); + unsigned long (*get_map_ofs) (drm_map_t * map); + unsigned long (*get_reg_ofs) (struct drm_device * dev); + void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); + + int major; + int minor; + int patchlevel; + char *name; + char *desc; + char *date; + +/* variables */ + u32 driver_features; + int dev_priv_size; + drm_ioctl_desc_t *ioctls; + int num_ioctls; + struct file_operations fops; + struct pci_driver pci_driver; +}; + +/** + * DRM head structure. This structure represent a video head on a card + * that may contain multiple heads. Embed one per head of these in the + * private drm_device structure. + */ +typedef struct drm_head { + int minor; /**< Minor device number */ + struct drm_device *dev; + struct proc_dir_entry *dev_root; /**< proc directory entry */ + dev_t device; /**< Device number for mknod */ + struct class_device *dev_class; +} drm_head_t; + +/** + * DRM device structure. This structure represent a complete card that + * may contain multiple heads. + */ +typedef struct drm_device { + char *unique; /**< Unique identifier: e.g., busid */ + int unique_len; /**< Length of unique field */ + char *devname; /**< For /proc/interrupts */ + int if_version; /**< Highest interface version set */ + + int blocked; /**< Blocked due to VC switch? */ + + /** \name Locks */ + /*@{ */ + spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ + struct semaphore struct_sem; /**< For others */ + /*@} */ + + /** \name Usage Counters */ + /*@{ */ + int open_count; /**< Outstanding files open */ + atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ + atomic_t vma_count; /**< Outstanding vma areas open */ + int buf_use; /**< Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /**< Buffer allocation in progress */ + /*@} */ + + /** \name Performance counters */ + /*@{ */ + unsigned long counters; + drm_stat_type_t types[15]; + atomic_t counts[15]; + /*@} */ + + /** \name Authentication */ + /*@{ */ + drm_file_t *file_first; /**< file list head */ + drm_file_t *file_last; /**< file list tail */ + drm_magic_head_t magiclist[DRM_HASH_SIZE]; /**< magic hash table */ + /*@} */ + + /** \name Memory management */ + /*@{ */ + drm_map_list_t *maplist; /**< Linked list of regions */ + int map_count; /**< Number of mappable regions */ + + /** \name Context handle management */ + /*@{ */ + drm_ctx_list_t *ctxlist; /**< Linked list of context handles */ + int ctx_count; /**< Number of context handles */ + struct semaphore ctxlist_sem; /**< For ctxlist */ + + drm_map_t **context_sareas; /**< per-context SAREA's */ + int max_context; + + drm_vma_entry_t *vmalist; /**< List of vmas (for debugging) */ + drm_lock_data_t lock; /**< Information on hardware lock */ + /*@} */ + + /** \name DMA queues (contexts) */ + /*@{ */ + int queue_count; /**< Number of active DMA queues */ + int queue_reserved; /**< Number of reserved DMA queues */ + int queue_slots; /**< Actual length of queuelist */ + drm_queue_t **queuelist; /**< Vector of pointers to DMA queues */ + drm_device_dma_t *dma; /**< Optional pointer for DMA support */ + /*@} */ + + /** \name Context support */ + /*@{ */ + int irq; /**< Interrupt used by board */ + int irq_enabled; /**< True if irq handler is enabled */ + __volatile__ long context_flag; /**< Context swapping flag */ + __volatile__ long interrupt_flag; /**< Interruption handler flag */ + __volatile__ long dma_flag; /**< DMA dispatch flag */ + struct timer_list timer; /**< Timer for delaying ctx switch */ + wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ + int last_checked; /**< Last context checked for DMA */ + int last_context; /**< Last current context */ + unsigned long last_switch; /**< jiffies at last context switch */ + /*@} */ + +#if !HAS_WORKQUEUE + struct tq_struct tq; +#else + struct work_struct work; +#endif + /** \name VBLANK IRQ support */ + /*@{ */ + + wait_queue_head_t vbl_queue; /**< VBLANK wait queue */ + atomic_t vbl_received; + spinlock_t vbl_lock; + drm_vbl_sig_t vbl_sigs; /**< signal list to send on VBLANK */ + unsigned int vbl_pending; + + /*@} */ + cycles_t ctx_start; + cycles_t lck_start; + + struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */ + wait_queue_head_t buf_readers; /**< Processes waiting to read */ + wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ + + drm_agp_head_t *agp; /**< AGP data */ + + struct pci_dev *pdev; /**< PCI device structure */ + int pci_domain; /**< PCI bus domain number */ + int pci_bus; /**< PCI bus number */ + int pci_slot; /**< PCI slot number */ + int pci_func; /**< PCI function number */ +#ifdef __alpha__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) + struct pci_controler *hose; +#else + struct pci_controller *hose; +#endif +#endif + drm_sg_mem_t *sg; /**< Scatter gather memory */ + unsigned long *ctx_bitmap; /**< context bitmap */ + void *dev_private; /**< device private data */ + drm_sigdata_t sigdata; /**< For block_all_signals */ + sigset_t sigmask; + + struct drm_driver *driver; + drm_local_map_t *agp_buffer_map; + unsigned int agp_buffer_token; + drm_head_t primary; /**< primary screen head */ +} drm_device_t; + +static __inline__ int drm_core_check_feature(struct drm_device *dev, + int feature) +{ + return ((dev->driver->driver_features & feature) ? 1 : 0); +} + +#if __OS_HAS_AGP +static inline int drm_core_has_AGP(struct drm_device *dev) +{ + return drm_core_check_feature(dev, DRIVER_USE_AGP); +} +#else +#define drm_core_has_AGP(dev) (0) +#endif + +#if __OS_HAS_MTRR +static inline int drm_core_has_MTRR(struct drm_device *dev) +{ + return drm_core_check_feature(dev, DRIVER_USE_MTRR); +} +#else +#define drm_core_has_MTRR(dev) (0) +#endif + +/******************************************************************/ +/** \name Internal function definitions */ +/*@{*/ + + /* Driver support (drm_drv.h) */ +extern int drm_fb_loaded; +extern int __devinit drm_init(struct drm_driver *driver, + struct pci_device_id *pciidlist); +extern void __exit drm_exit(struct drm_driver *driver); +extern void __exit drm_cleanup_pci(struct pci_dev *pdev); +extern int drm_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern long drm_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); + +extern int drm_lastclose(drm_device_t * dev); + + /* Device support (drm_fops.h) */ +extern int drm_open(struct inode *inode, struct file *filp); +extern int drm_stub_open(struct inode *inode, struct file *filp); +extern int drm_fasync(int fd, struct file *filp, int on); +extern int drm_release(struct inode *inode, struct file *filp); +unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); + + /* Mapping support (drm_vm.h) */ +extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); +extern unsigned long drm_core_get_map_ofs(drm_map_t * map); +extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); + + /* Memory management support (drm_memory.h) */ +#include "drm_memory.h" +extern void drm_mem_init(void); +extern int drm_mem_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +extern void *drm_calloc(size_t nmemb, size_t size, int area); +extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); +extern unsigned long drm_alloc_pages(int order, int area); +extern void drm_free_pages(unsigned long address, int order, int area); +extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type); +extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); +extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); +extern int drm_unbind_agp(DRM_AGP_MEM * handle); + + /* Misc. IOCTL support (drm_ioctl.h) */ +extern int drm_irq_by_busid(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_getunique(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_setunique(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_getmap(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_getclient(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_getstats(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_setversion(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_noop(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* Context IOCTL support (drm_context.h) */ +extern int drm_resctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_addctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_modctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_getctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_switchctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_newctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_rmctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +extern int drm_context_switch_complete(drm_device_t * dev, int new); + +extern int drm_ctxbitmap_init(drm_device_t * dev); +extern void drm_ctxbitmap_cleanup(drm_device_t * dev); +extern void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle); + +extern int drm_setsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_getsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* Drawable IOCTL support (drm_drawable.h) */ +extern int drm_adddraw(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_rmdraw(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* Authentication IOCTL support (drm_auth.h) */ +extern int drm_getmagic(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_authmagic(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* Locking IOCTL support (drm_lock.h) */ +extern int drm_lock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_unlock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context); +extern int drm_lock_free(drm_device_t * dev, + __volatile__ unsigned int *lock, unsigned int context); + + /* Buffer management support (drm_bufs.h) */ +extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); +extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request); +extern int drm_addbufs_fb (drm_device_t * dev, drm_buf_desc_t * request); +extern int drm_addmap(drm_device_t * dev, unsigned int offset, + unsigned int size, drm_map_type_t type, + drm_map_flags_t flags, drm_local_map_t ** map_ptr); +extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map); +extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map); +extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_addbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_infobufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_markbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_freebufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_mapbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_order(unsigned long size); +extern unsigned long drm_get_resource_start(drm_device_t *dev, + unsigned int resource); +extern unsigned long drm_get_resource_len(drm_device_t *dev, + unsigned int resource); + + /* DMA support (drm_dma.h) */ +extern int drm_dma_setup(drm_device_t * dev); +extern void drm_dma_takedown(drm_device_t * dev); +extern void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf); +extern void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp); + + /* IRQ support (drm_irq.h) */ +extern int drm_control(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); +extern int drm_irq_uninstall(drm_device_t *dev); +extern void drm_driver_irq_preinstall(drm_device_t * dev); +extern void drm_driver_irq_postinstall(drm_device_t * dev); +extern void drm_driver_irq_uninstall(drm_device_t * dev); + +extern int drm_wait_vblank(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq); +extern void drm_vbl_send_signals(drm_device_t * dev); + + /* AGP/GART support (drm_agpsupport.h) */ +extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); +extern int drm_agp_acquire(drm_device_t * dev); +extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_release(drm_device_t *dev); +extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode); +extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info); +extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request); +extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request); +extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request); +extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request); +extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) +extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type); +#else +extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type); +#endif +extern int drm_agp_free_memory(DRM_AGP_MEM * handle); +extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); +extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); + + /* Stub support (drm_stub.h) */ +extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, + struct drm_driver *driver); +extern int drm_put_dev(drm_device_t * dev); +extern int drm_put_head(drm_head_t * head); +extern unsigned int drm_debug; /* 1 to enable debug output */ +extern unsigned int cards_limit; +extern drm_head_t **drm_heads; +extern struct drm_sysfs_class *drm_class; +extern struct proc_dir_entry *drm_proc_root; + + /* Proc support (drm_proc.h) */ +extern int drm_proc_init(drm_device_t * dev, + int minor, + struct proc_dir_entry *root, + struct proc_dir_entry **dev_root); +extern int drm_proc_cleanup(int minor, + struct proc_dir_entry *root, + struct proc_dir_entry *dev_root); + + /* Scatter Gather Support (drm_scatter.h) */ +extern void drm_sg_cleanup(drm_sg_mem_t * entry); +extern int drm_sg_alloc(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int drm_sg_free(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* ATI PCIGART support (ati_pcigart.h) */ +extern int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info); + +extern drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, + size_t align, dma_addr_t maxaddr); +extern void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); +extern void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); + + /* sysfs support (drm_sysfs.c) */ +struct drm_sysfs_class; +extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner, + char *name); +extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); +extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, + drm_head_t * head); +extern void drm_sysfs_device_remove(struct class_device *class_dev); + +/* Inline replacements for DRM_IOREMAP macros */ +static __inline__ void drm_core_ioremap(struct drm_map *map, + struct drm_device *dev) +{ + map->handle = drm_ioremap(map->offset, map->size, dev); +} + +static __inline__ void drm_core_ioremap_nocache(struct drm_map *map, + struct drm_device *dev) +{ + map->handle = drm_ioremap_nocache(map->offset, map->size, dev); +} + +static __inline__ void drm_core_ioremapfree(struct drm_map *map, + struct drm_device *dev) +{ + if (map->handle && map->size) + drm_ioremapfree(map->handle, map->size, dev); +} + +static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, + unsigned int token) +{ + drm_map_list_t *_entry; + list_for_each_entry(_entry, &dev->maplist->head, head) + if (_entry->user_token == token) + return _entry->map; + return NULL; +} + +static __inline__ int drm_device_is_agp(drm_device_t *dev) +{ + if ( dev->driver->device_is_agp != NULL ) { + int err = (*dev->driver->device_is_agp)( dev ); + + if (err != 2) { + return err; + } + } + + return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); +} + +static __inline__ int drm_device_is_pcie(drm_device_t *dev) +{ + return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); +} + +static __inline__ void drm_core_dropmap(struct drm_map *map) +{ +} + +#ifndef DEBUG_MEMORY +/** Wrapper around kmalloc() */ +static __inline__ void *drm_alloc(size_t size, int area) +{ + return kmalloc(size, GFP_KERNEL); +} + +/** Wrapper around kfree() */ +static __inline__ void drm_free(void *pt, size_t size, int area) +{ + kfree(pt); +} +#else +extern void *drm_alloc(size_t size, int area); +extern void drm_free(void *pt, size_t size, int area); +#endif + +/*@}*/ + +#endif /* __KERNEL__ */ +#endif diff --git a/nx-X11/extras/drm/linux-core/drm_agpsupport.c b/nx-X11/extras/drm/linux-core/drm_agpsupport.c new file mode 100644 index 000000000..572d7a0d6 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_agpsupport.c @@ -0,0 +1,554 @@ +/** + * \file drm_agpsupport.c + * DRM support for AGP/GART backend + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include <linux/module.h> + +#if __OS_HAS_AGP + +/** + * Get AGP information. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a (output) drm_agp_info structure. + * \return zero on success or a negative number on failure. + * + * Verifies the AGP device has been initialized and acquired and fills in the + * drm_agp_info structure with the information in drm_agp_head::agp_info. + */ +int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info) +{ + DRM_AGP_KERN *kern; + + if (!dev->agp || !dev->agp->acquired) + return -EINVAL; + + kern = &dev->agp->agp_info; + info->agp_version_major = kern->version.major; + info->agp_version_minor = kern->version.minor; + info->mode = kern->mode; + info->aperture_base = kern->aper_base; + info->aperture_size = kern->aper_size * 1024 * 1024; + info->memory_allowed = kern->max_memory << PAGE_SHIFT; + info->memory_used = kern->current_memory << PAGE_SHIFT; + info->id_vendor = kern->device->vendor; + info->id_device = kern->device->device; + + return 0; +} +EXPORT_SYMBOL(drm_agp_info); + +int drm_agp_info_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_agp_info_t info; + int err; + + err = drm_agp_info(dev, &info); + if (err) + return err; + + if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +/** + * Acquire the AGP device + * + * \param dev DRM device that is to acquire AGP. + * \return zero on success or a negative number on failure. + * + * Verifies the AGP device hasn't been acquired before and calls + * \c agp_backend_acquire. + */ +int drm_agp_acquire(drm_device_t * dev) +{ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) + int retcode; +#endif + + if (!dev->agp) + return -ENODEV; + if (dev->agp->acquired) + return -EBUSY; +#ifndef VMAP_4_ARGS + if (dev->agp->cant_use_aperture) + return -EINVAL; +#endif +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) + if ((retcode = agp_backend_acquire())) + return retcode; +#else + if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev))) + return -ENODEV; +#endif + + dev->agp->acquired = 1; + return 0; +} +EXPORT_SYMBOL(drm_agp_acquire); + +/** + * Acquire the AGP device (ioctl). + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or a negative number on failure. + * + * Verifies the AGP device hasn't been acquired before and calls + * \c agp_backend_acquire. + */ +int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + + return drm_agp_acquire( (drm_device_t *) priv->head->dev ); +} + +/** + * Release the AGP device + * + * \param dev DRM device that is to release AGP. + * \return zero on success or a negative number on failure. + * + * Verifies the AGP device has been acquired and calls \c agp_backend_release. + */ +int drm_agp_release(drm_device_t *dev) +{ + if (!dev->agp || !dev->agp->acquired) + return -EINVAL; +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) + agp_backend_release(); +#else + agp_backend_release(dev->agp->bridge); +#endif + dev->agp->acquired = 0; + return 0; + +} +EXPORT_SYMBOL(drm_agp_release); + +int drm_agp_release_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + + return drm_agp_release(dev); +} + +/** + * Enable the AGP bus. + * + * \param dev DRM device that has previously acquired AGP. + * \param mode Requested AGP mode. + * \return zero on success or a negative number on failure. + * + * Verifies the AGP device has been acquired but not enabled, and calls + * \c agp_enable. + */ +int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode) +{ + if (!dev->agp || !dev->agp->acquired) + return -EINVAL; + + dev->agp->mode = mode.mode; +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) + agp_enable(mode.mode); +#else + agp_enable(dev->agp->bridge, mode.mode); +#endif + dev->agp->base = dev->agp->agp_info.aper_base; + dev->agp->enabled = 1; + return 0; +} +EXPORT_SYMBOL(drm_agp_enable); + +int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_agp_mode_t mode; + + + if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode))) + return -EFAULT; + + return drm_agp_enable(dev, mode); +} + +/** + * Allocate AGP memory. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_agp_buffer structure. + * \return zero on success or a negative number on failure. + * + * Verifies the AGP device is present and has been acquired, allocates the + * memory via alloc_agp() and creates a drm_agp_mem entry for it. + */ +int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request) +{ + drm_agp_mem_t *entry; + DRM_AGP_MEM *memory; + unsigned long pages; + u32 type; + + if (!dev->agp || !dev->agp->acquired) + return -EINVAL; + if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) + return -ENOMEM; + + memset(entry, 0, sizeof(*entry)); + + pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; + type = (u32) request->type; + if (!(memory = drm_alloc_agp(dev, pages, type))) { + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + return -ENOMEM; + } + + entry->handle = (unsigned long)memory->key + 1; + entry->memory = memory; + entry->bound = 0; + entry->pages = pages; + entry->prev = NULL; + entry->next = dev->agp->memory; + if (dev->agp->memory) + dev->agp->memory->prev = entry; + dev->agp->memory = entry; + + request->handle = entry->handle; + request->physical = memory->physical; + + return 0; +} +EXPORT_SYMBOL(drm_agp_alloc); + + +int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_agp_buffer_t request; + drm_agp_buffer_t __user *argp = (void __user *)arg; + int err; + + if (copy_from_user(&request, argp, sizeof(request))) + return -EFAULT; + + err = drm_agp_alloc(dev, &request); + if (err) + return err; + + if (copy_to_user(argp, &request, sizeof(request))) { + drm_agp_mem_t *entry = dev->agp->memory; + + dev->agp->memory = entry->next; + dev->agp->memory->prev = NULL; + drm_free_agp(entry->memory, entry->pages); + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + return -EFAULT; + } + + return 0; +} + +/** + * Search for the AGP memory entry associated with a handle. + * + * \param dev DRM device structure. + * \param handle AGP memory handle. + * \return pointer to the drm_agp_mem structure associated with \p handle. + * + * Walks through drm_agp_head::memory until finding a matching handle. + */ +static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, + unsigned long handle) +{ + drm_agp_mem_t *entry; + + for (entry = dev->agp->memory; entry; entry = entry->next) { + if (entry->handle == handle) + return entry; + } + return NULL; +} + +/** + * Unbind AGP memory from the GATT (ioctl). + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_agp_binding structure. + * \return zero on success or a negative number on failure. + * + * Verifies the AGP device is present and acquired, looks-up the AGP memory + * entry and passes it to the unbind_agp() function. + */ +int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request) +{ + drm_agp_mem_t *entry; + int ret; + + if (!dev->agp || !dev->agp->acquired) + return -EINVAL; + if (!(entry = drm_agp_lookup_entry(dev, request->handle))) + return -EINVAL; + if (!entry->bound) + return -EINVAL; + ret = drm_unbind_agp(entry->memory); + if (ret == 0) + entry->bound = 0; + return ret; +} +EXPORT_SYMBOL(drm_agp_unbind); + + +int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_agp_binding_t request; + + if (copy_from_user + (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) + return -EFAULT; + + return drm_agp_unbind(dev, &request); +} + + +/** + * Bind AGP memory into the GATT (ioctl) + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_agp_binding structure. + * \return zero on success or a negative number on failure. + * + * Verifies the AGP device is present and has been acquired and that no memory + * is currently bound into the GATT. Looks-up the AGP memory entry and passes + * it to bind_agp() function. + */ +int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request) +{ + drm_agp_mem_t *entry; + int retcode; + int page; + + if (!dev->agp || !dev->agp->acquired) + return -EINVAL; + if (!(entry = drm_agp_lookup_entry(dev, request->handle))) + return -EINVAL; + if (entry->bound) + return -EINVAL; + page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE; + if ((retcode = drm_bind_agp(entry->memory, page))) + return retcode; + entry->bound = dev->agp->base + (page << PAGE_SHIFT); + DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", + dev->agp->base, entry->bound); + return 0; +} +EXPORT_SYMBOL(drm_agp_bind); + + +int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_agp_binding_t request; + + if (copy_from_user + (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) + return -EFAULT; + + return drm_agp_bind(dev, &request); +} + + +/** + * Free AGP memory (ioctl). + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_agp_buffer structure. + * \return zero on success or a negative number on failure. + * + * Verifies the AGP device is present and has been acquired and looks up the + * AGP memory entry. If the memory it's currently bound, unbind it via + * unbind_agp(). Frees it via free_agp() as well as the entry itself + * and unlinks from the doubly linked list it's inserted in. + */ +int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request) +{ + drm_agp_mem_t *entry; + + if (!dev->agp || !dev->agp->acquired) + return -EINVAL; + if (!(entry = drm_agp_lookup_entry(dev, request->handle))) + return -EINVAL; + if (entry->bound) + drm_unbind_agp(entry->memory); + + if (entry->prev) + entry->prev->next = entry->next; + else + dev->agp->memory = entry->next; + + if (entry->next) + entry->next->prev = entry->prev; + + drm_free_agp(entry->memory, entry->pages); + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + return 0; +} +EXPORT_SYMBOL(drm_agp_free); + + + +int drm_agp_free_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_agp_buffer_t request; + + if (copy_from_user + (&request, (drm_agp_buffer_t __user *) arg, sizeof(request))) + return -EFAULT; + + return drm_agp_free(dev, &request); +} + + +/** + * Initialize the AGP resources. + * + * \return pointer to a drm_agp_head structure. + * + * Gets the drm_agp_t structure which is made available by the agpgart module + * via the inter_module_* functions. Creates and initializes a drm_agp_head + * structure. + */ +drm_agp_head_t *drm_agp_init(drm_device_t *dev) +{ + drm_agp_head_t *head = NULL; + + if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) + return NULL; + memset((void *)head, 0, sizeof(*head)); + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) + agp_copy_info(&head->agp_info); +#else + head->bridge = agp_find_bridge(dev->pdev); + if (!head->bridge) { + if (!(head->bridge = agp_backend_acquire(dev->pdev))) { + drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); + return NULL; + } + agp_copy_info(head->bridge, &head->agp_info); + agp_backend_release(head->bridge); + } else { + agp_copy_info(head->bridge, &head->agp_info); + } +#endif + if (head->agp_info.chipset == NOT_SUPPORTED) { + drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); + return NULL; + } + head->memory = NULL; + head->cant_use_aperture = head->agp_info.cant_use_aperture; + head->page_mask = head->agp_info.page_mask; + return head; +} + +/** Calls agp_allocate_memory() */ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) +DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type) +{ + return agp_allocate_memory(pages, type); +} +#else +DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type) +{ + return agp_allocate_memory(bridge, pages, type); +} +#endif + +/** Calls agp_free_memory() */ +int drm_agp_free_memory(DRM_AGP_MEM * handle) +{ + if (!handle) + return 0; + agp_free_memory(handle); + return 1; +} + +/** Calls agp_bind_memory() */ +int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start) +{ + if (!handle) + return -EINVAL; + return agp_bind_memory(handle, start); +} +EXPORT_SYMBOL(drm_agp_bind_memory); + +/** Calls agp_unbind_memory() */ +int drm_agp_unbind_memory(DRM_AGP_MEM * handle) +{ + if (!handle) + return -EINVAL; + return agp_unbind_memory(handle); +} + +#endif /* __OS_HAS_AGP */ diff --git a/nx-X11/extras/drm/linux-core/drm_auth.c b/nx-X11/extras/drm/linux-core/drm_auth.c new file mode 100644 index 000000000..e32930893 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_auth.c @@ -0,0 +1,231 @@ +/** + * \file drm_auth.c + * IOCTLs for authentication + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +/** + * Generate a hash key from a magic. + * + * \param magic magic. + * \return hash key. + * + * The key is the modulus of the hash table size, #DRM_HASH_SIZE, which must be + * a power of 2. + */ +static int drm_hash_magic(drm_magic_t magic) +{ + return magic & (DRM_HASH_SIZE - 1); +} + +/** + * Find the file with the given magic number. + * + * \param dev DRM device. + * \param magic magic number. + * + * Searches in drm_device::magiclist within all files with the same hash key + * the one with matching magic number, while holding the drm_device::struct_sem + * lock. + */ +static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) +{ + drm_file_t *retval = NULL; + drm_magic_entry_t *pt; + int hash = drm_hash_magic(magic); + + down(&dev->struct_sem); + for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { + if (pt->magic == magic) { + retval = pt->priv; + break; + } + } + up(&dev->struct_sem); + return retval; +} + +/** + * Adds a magic number. + * + * \param dev DRM device. + * \param priv file private data. + * \param magic magic number. + * + * Creates a drm_magic_entry structure and appends to the linked list + * associated the magic number hash key in drm_device::magiclist, while holding + * the drm_device::struct_sem lock. + */ +static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, drm_magic_t magic) +{ + int hash; + drm_magic_entry_t *entry; + + DRM_DEBUG("%d\n", magic); + + hash = drm_hash_magic(magic); + entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); + if (!entry) + return -ENOMEM; + memset(entry, 0, sizeof(*entry)); + entry->magic = magic; + entry->priv = priv; + entry->next = NULL; + + down(&dev->struct_sem); + if (dev->magiclist[hash].tail) { + dev->magiclist[hash].tail->next = entry; + dev->magiclist[hash].tail = entry; + } else { + dev->magiclist[hash].head = entry; + dev->magiclist[hash].tail = entry; + } + up(&dev->struct_sem); + + return 0; +} + +/** + * Remove a magic number. + * + * \param dev DRM device. + * \param magic magic number. + * + * Searches and unlinks the entry in drm_device::magiclist with the magic + * number hash key, while holding the drm_device::struct_sem lock. + */ +static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) +{ + drm_magic_entry_t *prev = NULL; + drm_magic_entry_t *pt; + int hash; + + DRM_DEBUG("%d\n", magic); + hash = drm_hash_magic(magic); + + down(&dev->struct_sem); + for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { + if (pt->magic == magic) { + if (dev->magiclist[hash].head == pt) { + dev->magiclist[hash].head = pt->next; + } + if (dev->magiclist[hash].tail == pt) { + dev->magiclist[hash].tail = prev; + } + if (prev) { + prev->next = pt->next; + } + up(&dev->struct_sem); + return 0; + } + } + up(&dev->struct_sem); + + drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + + return -EINVAL; +} + +/** + * Get a unique magic number (ioctl). + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a resulting drm_auth structure. + * \return zero on success, or a negative number on failure. + * + * If there is a magic number in drm_file::magic then use it, otherwise + * searches an unique non-zero magic number and add it associating it with \p + * filp. + */ +int drm_getmagic(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + static drm_magic_t sequence = 0; + static spinlock_t lock = SPIN_LOCK_UNLOCKED; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_auth_t auth; + + /* Find unique magic */ + if (priv->magic) { + auth.magic = priv->magic; + } else { + do { + spin_lock(&lock); + if (!sequence) + ++sequence; /* reserve 0 */ + auth.magic = sequence++; + spin_unlock(&lock); + } while (drm_find_file(dev, auth.magic)); + priv->magic = auth.magic; + drm_add_magic(dev, priv, auth.magic); + } + + DRM_DEBUG("%u\n", auth.magic); + if (copy_to_user((drm_auth_t __user *) arg, &auth, sizeof(auth))) + return -EFAULT; + return 0; +} + +/** + * Authenticate with a magic. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_auth structure. + * \return zero if authentication successed, or a negative number otherwise. + * + * Checks if \p filp is associated with the magic number passed in \arg. + */ +int drm_authmagic(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_auth_t auth; + drm_file_t *file; + + if (copy_from_user(&auth, (drm_auth_t __user *) arg, sizeof(auth))) + return -EFAULT; + DRM_DEBUG("%u\n", auth.magic); + if ((file = drm_find_file(dev, auth.magic))) { + file->authenticated = 1; + drm_remove_magic(dev, auth.magic); + return 0; + } + return -EINVAL; +} diff --git a/nx-X11/extras/drm/linux-core/drm_bufs.c b/nx-X11/extras/drm/linux-core/drm_bufs.c new file mode 100644 index 000000000..5e70e47f2 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_bufs.c @@ -0,0 +1,1676 @@ +/** + * \file drm_bufs.c + * Generic buffer template + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/vmalloc.h> +#include "drmP.h" + +unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) +{ + return pci_resource_start(dev->pdev, resource); +} +EXPORT_SYMBOL(drm_get_resource_start); + +unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) +{ + return pci_resource_len(dev->pdev, resource); +} +EXPORT_SYMBOL(drm_get_resource_len); + +static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, + drm_local_map_t *map) +{ + struct list_head *list; + + list_for_each(list, &dev->maplist->head) { + drm_map_list_t *entry = list_entry(list, drm_map_list_t, head); + if (entry->map && map->type == entry->map->type && + ((entry->map->offset == map->offset) || + (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { + return entry; + } + } + + return NULL; +} + + +/* + * Used to allocate 32-bit handles for mappings. + */ +#define START_RANGE 0x10000000 +#define END_RANGE 0x40000000 + +#ifdef _LP64 +static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev) +{ + static unsigned int map32_handle = START_RANGE; + unsigned int hash; + + if (lhandle & 0xffffffff00000000) { + hash = map32_handle; + map32_handle += PAGE_SIZE; + if (map32_handle > END_RANGE) + map32_handle = START_RANGE; + } else + hash = lhandle; + + while (1) { + drm_map_list_t *_entry; + list_for_each_entry(_entry, &dev->maplist->head,head) { + if (_entry->user_token == hash) + break; + } + if (&_entry->head == &dev->maplist->head) + return hash; + + hash += PAGE_SIZE; + map32_handle += PAGE_SIZE; + } +} +#else +# define HandleID(x,dev) (unsigned int)(x) +#endif + +/** + * Ioctl to specify a range of memory that is available for mapping by a non-root process. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_map structure. + * \return zero on success or a negative value on error. + * + * Adjusts the memory offset to its absolute value according to the mapping + * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where + * applicable and if supported by the kernel. + */ +int drm_addmap_core(drm_device_t * dev, unsigned int offset, + unsigned int size, drm_map_type_t type, + drm_map_flags_t flags, drm_map_list_t ** maplist) +{ + drm_map_t *map; + drm_map_list_t *list; + drm_dma_handle_t *dmah; + + map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); + if (!map) + return -ENOMEM; + + map->offset = offset; + map->size = size; + map->flags = flags; + map->type = type; + + /* Only allow shared memory to be removable since we only keep enough + * book keeping information about shared memory to allow for removal + * when processes fork. + */ + if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EINVAL; + } + DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", + map->offset, map->size, map->type); + if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EINVAL; + } + map->mtrr = -1; + map->handle = NULL; + + switch (map->type) { + case _DRM_REGISTERS: + case _DRM_FRAME_BUFFER: +#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) + if (map->offset + map->size < map->offset || + map->offset < virt_to_phys(high_memory)) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EINVAL; + } +#endif +#ifdef __alpha__ + map->offset += dev->hose->mem_space->start; +#endif + /* Some drivers preinitialize some maps, without the X Server + * needing to be aware of it. Therefore, we just return success + * when the server tries to create a duplicate map. + */ + list = drm_find_matching_map(dev, map); + if (list != NULL) { + if (list->map->size != map->size) { + DRM_DEBUG("Matching maps of type %d with " + "mismatched sizes, (%ld vs %ld)\n", + map->type, map->size, + list->map->size); + list->map->size = map->size; + } + + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + *maplist = list; + return 0; + } + + if (drm_core_has_MTRR(dev)) { + if (map->type == _DRM_FRAME_BUFFER || + (map->flags & _DRM_WRITE_COMBINING)) { + map->mtrr = mtrr_add(map->offset, map->size, + MTRR_TYPE_WRCOMB, 1); + } + } + if (map->type == _DRM_REGISTERS) + map->handle = drm_ioremap(map->offset, map->size, dev); + break; + case _DRM_SHM: + list = drm_find_matching_map(dev, map); + if (list != NULL) { + if(list->map->size != map->size) { + DRM_DEBUG("Matching maps of type %d with " + "mismatched sizes, (%ld vs %ld)\n", + map->type, map->size, list->map->size); + list->map->size = map->size; + } + + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + *maplist = list; + return 0; + } + map->handle = vmalloc_32(map->size); + DRM_DEBUG("%lu %d %p\n", + map->size, drm_order(map->size), map->handle); + if (!map->handle) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -ENOMEM; + } + map->offset = (unsigned long)map->handle; + if (map->flags & _DRM_CONTAINS_LOCK) { + /* Prevent a 2nd X Server from creating a 2nd lock */ + if (dev->lock.hw_lock != NULL) { + vfree(map->handle); + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EBUSY; + } + dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ + } + break; + case _DRM_AGP: { + drm_agp_mem_t *entry; + int valid = 0; + + if (!drm_core_has_AGP(dev)) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EINVAL; + } +#ifdef __alpha__ + map->offset += dev->hose->mem_space->start; +#endif + /* Note: dev->agp->base may actually be 0 when the DRM + * is not in control of AGP space. But if user space is + * it should already have added the AGP base itself. + */ + map->offset += dev->agp->base; + map->mtrr = dev->agp->agp_mtrr; /* for getmap */ + + /* This assumes the DRM is in total control of AGP space. + * It's not always the case as AGP can be in the control + * of user space (i.e. i810 driver). So this loop will get + * skipped and we double check that dev->agp->memory is + * actually set as well as being invalid before EPERM'ing + */ + for (entry = dev->agp->memory; entry; entry = entry->next) { + if ((map->offset >= entry->bound) && + (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { + valid = 1; + break; + } + } + if (dev->agp->memory && !valid) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EPERM; + } + DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); + break; + } + case _DRM_SCATTER_GATHER: + if (!dev->sg) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EINVAL; + } + map->offset += (unsigned long)dev->sg->virtual; + break; + case _DRM_CONSISTENT: { + /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G. + * As we're limiting the address to 2^32-1 (or less), + * casting it down to 32 bits is no problem, but we + * need to point to a 64bit variable first. */ + dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); + if (!dmah) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -ENOMEM; + } + map->handle = dmah->vaddr; + map->offset = (unsigned long)dmah->busaddr; + kfree(dmah); + break; + } + default: + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EINVAL; + } + + list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); + if (!list) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EINVAL; + } + memset(list, 0, sizeof(*list)); + list->map = map; + + down(&dev->struct_sem); + list_add(&list->head, &dev->maplist->head); + /* Assign a 32-bit handle */ + /* We do it here so that dev->struct_sem protects the increment */ + list->user_token = HandleID(map->type==_DRM_SHM + ? (unsigned long)map->handle + : map->offset, dev); + up(&dev->struct_sem); + + *maplist = list; + return 0; +} + +int drm_addmap(drm_device_t * dev, unsigned int offset, + unsigned int size, drm_map_type_t type, + drm_map_flags_t flags, drm_local_map_t ** map_ptr) +{ + drm_map_list_t *list; + int rc; + + rc = drm_addmap_core(dev, offset, size, type, flags, &list); + if (!rc) + *map_ptr = list->map; + return rc; +} + +EXPORT_SYMBOL(drm_addmap); + +int drm_addmap_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_map_t map; + drm_map_list_t *maplist; + drm_map_t __user *argp = (void __user *)arg; + int err; + + if (!(filp->f_mode & 3)) + return -EACCES; /* Require read/write */ + + if (copy_from_user(& map, argp, sizeof(map))) { + return -EFAULT; + } + + if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP)) + return -EPERM; + + err = drm_addmap_core( dev, map.offset, map.size, map.type, map.flags, + &maplist); + + if (err) + return err; + + if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) + return -EFAULT; + if (put_user((void *)maplist->user_token, &argp->handle)) + return -EFAULT; + return 0; +} + +/** + * Remove a map private from list and deallocate resources if the mapping + * isn't in use. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_map_t structure. + * \return zero on success or a negative value on error. + * + * Searches the map on drm_device::maplist, removes it from the list, see if + * its being used, and free any associate resource (such as MTRR's) if it's not + * being on use. + * + * \sa drm_addmap + */ +int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) +{ + struct list_head *list; + drm_map_list_t *r_list = NULL; + drm_dma_handle_t dmah; + + /* Find the list entry for the map and remove it */ + list_for_each(list, &dev->maplist->head) { + r_list = list_entry(list, drm_map_list_t, head); + + if (r_list->map == map) { + list_del(list); + drm_free(list, sizeof(*list), DRM_MEM_MAPS); + break; + } + } + + /* List has wrapped around to the head pointer, or it's empty and we + * didn't find anything. + */ + if (list == (&dev->maplist->head)) { + return -EINVAL; + } + + switch (map->type) { + case _DRM_REGISTERS: + drm_ioremapfree(map->handle, map->size, dev); + /* FALLTHROUGH */ + case _DRM_FRAME_BUFFER: + if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { + int retcode; + retcode = mtrr_del(map->mtrr, map->offset, + map->size); + DRM_DEBUG ("mtrr_del=%d\n", retcode); + } + break; + case _DRM_SHM: + vfree(map->handle); + break; + case _DRM_AGP: + case _DRM_SCATTER_GATHER: + break; + case _DRM_CONSISTENT: + dmah.vaddr = map->handle; + dmah.busaddr = map->offset; + dmah.size = map->size; + __drm_pci_free(dev, &dmah); + break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + + return 0; +} +EXPORT_SYMBOL(drm_rmmap_locked); + +int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) +{ + int ret; + + down(&dev->struct_sem); + ret = drm_rmmap_locked(dev, map); + up(&dev->struct_sem); + + return ret; +} +EXPORT_SYMBOL(drm_rmmap); + +/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on + * the last close of the device, and this is necessary for cleanup when things + * exit uncleanly. Therefore, having userland manually remove mappings seems + * like a pointless exercise since they're going away anyway. + * + * One use case might be after addmap is allowed for normal users for SHM and + * gets used by drivers that the server doesn't need to care about. This seems + * unlikely. + */ +int drm_rmmap_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_map_t request; + drm_local_map_t *map = NULL; + struct list_head *list; + int ret; + + if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) { + return -EFAULT; + } + + down(&dev->struct_sem); + list_for_each(list, &dev->maplist->head) { + drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); + + if (r_list->map && + r_list->user_token == (unsigned long) request.handle && + r_list->map->flags & _DRM_REMOVABLE) { + map = r_list->map; + break; + } + } + + /* List has wrapped around to the head pointer, or its empty we didn't + * find anything. + */ + if (list == (&dev->maplist->head)) { + up(&dev->struct_sem); + return -EINVAL; + } + + if (!map) + return -EINVAL; + + /* Register and framebuffer maps are permanent */ + if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { + up(&dev->struct_sem); + return 0; + } + + ret = drm_rmmap_locked(dev, map); + + up(&dev->struct_sem); + + return ret; +} + +/** + * Cleanup after an error on one of the addbufs() functions. + * + * \param dev DRM device. + * \param entry buffer entry where the error occurred. + * + * Frees any pages and buffers associated with the given entry. + */ +static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) +{ + int i; + + if (entry->seg_count) { + for (i = 0; i < entry->seg_count; i++) { + if (entry->seglist[i]) { + drm_free_pages(entry->seglist[i], + entry->page_order, DRM_MEM_DMA); + } + } + drm_free(entry->seglist, + entry->seg_count * + sizeof(*entry->seglist), DRM_MEM_SEGS); + + entry->seg_count = 0; + } + + if (entry->buf_count) { + for (i = 0; i < entry->buf_count; i++) { + if (entry->buflist[i].dev_private) { + drm_free(entry->buflist[i].dev_private, + entry->buflist[i].dev_priv_size, + DRM_MEM_BUFS); + } + } + drm_free(entry->buflist, + entry->buf_count * + sizeof(*entry->buflist), DRM_MEM_BUFS); + + entry->buf_count = 0; + } +} + +#if __OS_HAS_AGP +/** + * Add AGP buffers for DMA transfers + * + * \param dev drm_device_t to which the buffers are to be added. + * \param request pointer to a drm_buf_desc_t describing the request. + * \return zero on success or a negative number on failure. + * + * After some sanity checks creates a drm_buf structure for each buffer and + * reallocates the buffer list of the same size order to accommodate the new + * buffers. + */ +int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) +{ + drm_device_dma_t *dma = dev->dma; + drm_buf_entry_t *entry; + drm_agp_mem_t *agp_entry; + drm_buf_t *buf; + unsigned long offset; + unsigned long agp_offset; + int count; + int order; + int size; + int alignment; + int page_order; + int total; + int byte_count; + int i, valid; + drm_buf_t **temp_buflist; + + if (!dma) + return -EINVAL; + + count = request->count; + order = drm_order(request->size); + size = 1 << order; + + alignment = (request->flags & _DRM_PAGE_ALIGN) + ? PAGE_ALIGN(size) : size; + page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; + total = PAGE_SIZE << page_order; + + byte_count = 0; + agp_offset = dev->agp->base + request->agp_start; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %lx\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + if (dev->queue_count) + return -EBUSY; /* Not while in use */ + + /* Make sure buffers are located in AGP memory that we own */ + valid = 0; + for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) { + if ((agp_offset >= agp_entry->bound) && + (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { + valid = 1; + break; + } + } + if (dev->agp->memory && !valid) { + DRM_DEBUG("zone invalid\n"); + return -EINVAL; + } + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); + + down(&dev->struct_sem); + entry = &dma->bufs[order]; + if (entry->buf_count) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ + } + + if (count < 0 || count > 4096) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -EINVAL; + } + + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + + entry->buf_size = size; + entry->page_order = page_order; + + offset = 0; + + while (entry->buf_count < count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + + buf->offset = (dma->byte_count + offset); + buf->bus_address = agp_offset + offset; + buf->address = (void *)(agp_offset + offset); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->filp = NULL; + + buf->dev_priv_size = dev->driver->dev_priv_size; + buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); + if (!buf->dev_private) { + /* Set count correctly so we free the proper amount. */ + entry->buf_count = count; + drm_cleanup_buf_error(dev, entry); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(buf->dev_private, 0, buf->dev_priv_size); + + DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); + + offset += alignment; + entry->buf_count++; + byte_count += PAGE_SIZE << page_order; + } + + DRM_DEBUG("byte_count: %d\n", byte_count); + + temp_buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), DRM_MEM_BUFS); + if (!temp_buflist) { + /* Free the entry because it isn't valid */ + drm_cleanup_buf_error(dev, entry); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + dma->buflist = temp_buflist; + + for (i = 0; i < entry->buf_count; i++) { + dma->buflist[i + dma->buf_count] = &entry->buflist[i]; + } + + dma->buf_count += entry->buf_count; + dma->seg_count += entry->seg_count; + dma->page_count += byte_count >> PAGE_SHIFT; + dma->byte_count += byte_count; + + DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); + DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); + + up(&dev->struct_sem); + + request->count = entry->buf_count; + request->size = size; + + dma->flags = _DRM_DMA_USE_AGP; + + atomic_dec(&dev->buf_alloc); + return 0; +} +EXPORT_SYMBOL(drm_addbufs_agp); +#endif /* __OS_HAS_AGP */ + +int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) +{ + drm_device_dma_t *dma = dev->dma; + int count; + int order; + int size; + int total; + int page_order; + drm_buf_entry_t *entry; + unsigned long page; + drm_buf_t *buf; + int alignment; + unsigned long offset; + int i; + int byte_count; + int page_count; + unsigned long *temp_pagelist; + drm_buf_t **temp_buflist; + + if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) + return -EINVAL; + + if (!dma) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + count = request->count; + order = drm_order(request->size); + size = 1 << order; + + DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", + request->count, request->size, size, order, dev->queue_count); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + if (dev->queue_count) + return -EBUSY; /* Not while in use */ + + alignment = (request->flags & _DRM_PAGE_ALIGN) + ? PAGE_ALIGN(size) : size; + page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; + total = PAGE_SIZE << page_order; + + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); + + down(&dev->struct_sem); + entry = &dma->bufs[order]; + if (entry->buf_count) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ + } + + if (count < 0 || count > 4096) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -EINVAL; + } + + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + + entry->seglist = drm_alloc(count * sizeof(*entry->seglist), + DRM_MEM_SEGS); + if (!entry->seglist) { + drm_free(entry->buflist, + count * sizeof(*entry->buflist), DRM_MEM_BUFS); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->seglist, 0, count * sizeof(*entry->seglist)); + + /* Keep the original pagelist until we know all the allocations + * have succeeded + */ + temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) + * sizeof(*dma->pagelist), DRM_MEM_PAGES); + if (!temp_pagelist) { + drm_free(entry->buflist, + count * sizeof(*entry->buflist), DRM_MEM_BUFS); + drm_free(entry->seglist, + count * sizeof(*entry->seglist), DRM_MEM_SEGS); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memcpy(temp_pagelist, + dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); + DRM_DEBUG("pagelist: %d entries\n", + dma->page_count + (count << page_order)); + + entry->buf_size = size; + entry->page_order = page_order; + byte_count = 0; + page_count = 0; + + while (entry->buf_count < count) { + page = drm_alloc_pages(page_order, DRM_MEM_DMA); + if (!page) { + /* Set count correctly so we free the proper amount. */ + entry->buf_count = count; + entry->seg_count = count; + drm_cleanup_buf_error(dev, entry); + drm_free(temp_pagelist, + (dma->page_count + (count << page_order)) + * sizeof(*dma->pagelist), DRM_MEM_PAGES); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + entry->seglist[entry->seg_count++] = page; + for (i = 0; i < (1 << page_order); i++) { + DRM_DEBUG("page %d @ 0x%08lx\n", + dma->page_count + page_count, + page + PAGE_SIZE * i); + temp_pagelist[dma->page_count + page_count++] + = page + PAGE_SIZE * i; + } + for (offset = 0; + offset + size <= total && entry->buf_count < count; + offset += alignment, ++entry->buf_count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + buf->offset = (dma->byte_count + byte_count + offset); + buf->address = (void *)(page + offset); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->filp = NULL; + + buf->dev_priv_size = dev->driver->dev_priv_size; + buf->dev_private = drm_alloc(dev->driver->dev_priv_size, + DRM_MEM_BUFS); + if (!buf->dev_private) { + /* Set count correctly so we free the proper amount. */ + entry->buf_count = count; + entry->seg_count = count; + drm_cleanup_buf_error(dev, entry); + drm_free(temp_pagelist, + (dma->page_count + + (count << page_order)) + * sizeof(*dma->pagelist), + DRM_MEM_PAGES); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(buf->dev_private, 0, buf->dev_priv_size); + + DRM_DEBUG("buffer %d @ %p\n", + entry->buf_count, buf->address); + } + byte_count += PAGE_SIZE << page_order; + } + + temp_buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), DRM_MEM_BUFS); + if (!temp_buflist) { + /* Free the entry because it isn't valid */ + drm_cleanup_buf_error(dev, entry); + drm_free(temp_pagelist, + (dma->page_count + (count << page_order)) + * sizeof(*dma->pagelist), DRM_MEM_PAGES); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + dma->buflist = temp_buflist; + + for (i = 0; i < entry->buf_count; i++) { + dma->buflist[i + dma->buf_count] = &entry->buflist[i]; + } + + /* No allocations failed, so now we can replace the orginal pagelist + * with the new one. + */ + if (dma->page_count) { + drm_free(dma->pagelist, + dma->page_count * sizeof(*dma->pagelist), + DRM_MEM_PAGES); + } + dma->pagelist = temp_pagelist; + + dma->buf_count += entry->buf_count; + dma->seg_count += entry->seg_count; + dma->page_count += entry->seg_count << page_order; + dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); + + up(&dev->struct_sem); + + request->count = entry->buf_count; + request->size = size; + + atomic_dec(&dev->buf_alloc); + return 0; + +} +EXPORT_SYMBOL(drm_addbufs_pci); + +static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) +{ + drm_device_dma_t *dma = dev->dma; + drm_buf_entry_t *entry; + drm_buf_t *buf; + unsigned long offset; + unsigned long agp_offset; + int count; + int order; + int size; + int alignment; + int page_order; + int total; + int byte_count; + int i; + drm_buf_t **temp_buflist; + + if (!drm_core_check_feature(dev, DRIVER_SG)) + return -EINVAL; + + if (!dma) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + count = request->count; + order = drm_order(request->size); + size = 1 << order; + + alignment = (request->flags & _DRM_PAGE_ALIGN) + ? PAGE_ALIGN(size) : size; + page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; + total = PAGE_SIZE << page_order; + + byte_count = 0; + agp_offset = request->agp_start; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %lu\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + if (dev->queue_count) + return -EBUSY; /* Not while in use */ + + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); + + down(&dev->struct_sem); + entry = &dma->bufs[order]; + if (entry->buf_count) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ + } + + if (count < 0 || count > 4096) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -EINVAL; + } + + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + + entry->buf_size = size; + entry->page_order = page_order; + + offset = 0; + + while (entry->buf_count < count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + + buf->offset = (dma->byte_count + offset); + buf->bus_address = agp_offset + offset; + buf->address = (void *)(agp_offset + offset + + (unsigned long)dev->sg->virtual); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->filp = NULL; + + buf->dev_priv_size = dev->driver->dev_priv_size; + buf->dev_private = drm_alloc(dev->driver->dev_priv_size, + DRM_MEM_BUFS); + if (!buf->dev_private) { + /* Set count correctly so we free the proper amount. */ + entry->buf_count = count; + drm_cleanup_buf_error(dev, entry); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + + memset(buf->dev_private, 0, buf->dev_priv_size); + + DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); + + offset += alignment; + entry->buf_count++; + byte_count += PAGE_SIZE << page_order; + } + + DRM_DEBUG("byte_count: %d\n", byte_count); + + temp_buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), DRM_MEM_BUFS); + if (!temp_buflist) { + /* Free the entry because it isn't valid */ + drm_cleanup_buf_error(dev, entry); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + dma->buflist = temp_buflist; + + for (i = 0; i < entry->buf_count; i++) { + dma->buflist[i + dma->buf_count] = &entry->buflist[i]; + } + + dma->buf_count += entry->buf_count; + dma->seg_count += entry->seg_count; + dma->page_count += byte_count >> PAGE_SHIFT; + dma->byte_count += byte_count; + + DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); + DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); + + up(&dev->struct_sem); + + request->count = entry->buf_count; + request->size = size; + + dma->flags = _DRM_DMA_USE_SG; + + atomic_dec(&dev->buf_alloc); + return 0; +} + + +int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) +{ + drm_device_dma_t *dma = dev->dma; + drm_buf_entry_t *entry; + drm_buf_t *buf; + unsigned long offset; + unsigned long agp_offset; + int count; + int order; + int size; + int alignment; + int page_order; + int total; + int byte_count; + int i; + drm_buf_t **temp_buflist; + + if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) + return -EINVAL; + + if (!dma) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + count = request->count; + order = drm_order(request->size); + size = 1 << order; + + alignment = (request->flags & _DRM_PAGE_ALIGN) + ? PAGE_ALIGN(size) : size; + page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; + total = PAGE_SIZE << page_order; + + byte_count = 0; + agp_offset = request->agp_start; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %lu\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + if (dev->queue_count) + return -EBUSY; /* Not while in use */ + + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); + + down(&dev->struct_sem); + entry = &dma->bufs[order]; + if (entry->buf_count) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ + } + + if (count < 0 || count > 4096) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -EINVAL; + } + + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + + entry->buf_size = size; + entry->page_order = page_order; + + offset = 0; + + while (entry->buf_count < count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + + buf->offset = (dma->byte_count + offset); + buf->bus_address = agp_offset + offset; + buf->address = (void *)(agp_offset + offset); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->filp = NULL; + + buf->dev_priv_size = dev->driver->dev_priv_size; + buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); + if (!buf->dev_private) { + /* Set count correctly so we free the proper amount. */ + entry->buf_count = count; + drm_cleanup_buf_error(dev, entry); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(buf->dev_private, 0, buf->dev_priv_size); + + DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); + + offset += alignment; + entry->buf_count++; + byte_count += PAGE_SIZE << page_order; + } + + DRM_DEBUG("byte_count: %d\n", byte_count); + + temp_buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), DRM_MEM_BUFS); + if (!temp_buflist) { + /* Free the entry because it isn't valid */ + drm_cleanup_buf_error(dev, entry); + up(&dev->struct_sem); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + dma->buflist = temp_buflist; + + for (i = 0; i < entry->buf_count; i++) { + dma->buflist[i + dma->buf_count] = &entry->buflist[i]; + } + + dma->buf_count += entry->buf_count; + dma->seg_count += entry->seg_count; + dma->page_count += byte_count >> PAGE_SHIFT; + dma->byte_count += byte_count; + + DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); + DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); + + up(&dev->struct_sem); + + request->count = entry->buf_count; + request->size = size; + + dma->flags = _DRM_DMA_USE_FB; + + atomic_dec(&dev->buf_alloc); + return 0; +} +EXPORT_SYMBOL(drm_addbufs_fb); + + +/** + * Add buffers for DMA transfers (ioctl). + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_buf_desc_t request. + * \return zero on success or a negative number on failure. + * + * According with the memory type specified in drm_buf_desc::flags and the + * build options, it dispatches the call either to addbufs_agp(), + * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent + * PCI memory respectively. + */ +int drm_addbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_buf_desc_t request; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + int ret; + + + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + return -EINVAL; + + if (copy_from_user(&request, (drm_buf_desc_t __user *) arg, + sizeof(request))) + return -EFAULT; + +#if __OS_HAS_AGP + if (request.flags & _DRM_AGP_BUFFER) { + ret = drm_addbufs_agp(dev, & request); + } + else +#endif + if (request.flags & _DRM_SG_BUFFER) + ret = drm_addbufs_sg(dev, & request); + else if (request.flags & _DRM_FB_BUFFER) + ret = drm_addbufs_fb(dev, & request); + else + ret = drm_addbufs_pci(dev, & request); + + if (ret == 0) { + if (copy_to_user( (void __user *) arg, &request, + sizeof(request))) { + ret = -EFAULT; + } + } + + return ret; +} + +/** + * Get information about the buffer mappings. + * + * This was originally mean for debugging purposes, or by a sophisticated + * client library to determine how best to use the available buffers (e.g., + * large buffers can be used for image transfer). + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_buf_info structure. + * \return zero on success or a negative number on failure. + * + * Increments drm_device::buf_use while holding the drm_device::count_lock + * lock, preventing of allocating more buffers after this call. Information + * about each requested buffer is then copied into user space. + */ +int drm_infobufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_device_dma_t *dma = dev->dma; + drm_buf_info_t request; + drm_buf_info_t __user *argp = (void __user *)arg; + int i; + int count; + + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + return -EINVAL; + + if (!dma) + return -EINVAL; + + spin_lock(&dev->count_lock); + if (atomic_read(&dev->buf_alloc)) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + ++dev->buf_use; /* Can't allocate more after this call */ + spin_unlock(&dev->count_lock); + + if (copy_from_user(&request, argp, sizeof(request))) + return -EFAULT; + + for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { + if (dma->bufs[i].buf_count) + ++count; + } + + DRM_DEBUG("count = %d\n", count); + + if (request.count >= count) { + for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { + if (dma->bufs[i].buf_count) { + drm_buf_desc_t __user *to = + &request.list[count]; + drm_buf_entry_t *from = &dma->bufs[i]; + drm_freelist_t *list = &dma->bufs[i].freelist; + if (copy_to_user(&to->count, + &from->buf_count, + sizeof(from->buf_count)) || + copy_to_user(&to->size, + &from->buf_size, + sizeof(from->buf_size)) || + copy_to_user(&to->low_mark, + &list->low_mark, + sizeof(list->low_mark)) || + copy_to_user(&to->high_mark, + &list->high_mark, + sizeof(list->high_mark))) + return -EFAULT; + + DRM_DEBUG("%d %d %d %d %d\n", + i, + dma->bufs[i].buf_count, + dma->bufs[i].buf_size, + dma->bufs[i].freelist.low_mark, + dma->bufs[i].freelist.high_mark); + ++count; + } + } + } + request.count = count; + + if (copy_to_user(argp, &request, sizeof(request))) + return -EFAULT; + + return 0; +} + +/** + * Specifies a low and high water mark for buffer allocation + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg a pointer to a drm_buf_desc structure. + * \return zero on success or a negative number on failure. + * + * Verifies that the size order is bounded between the admissible orders and + * updates the respective drm_device_dma::bufs entry low and high water mark. + * + * \note This ioctl is deprecated and mostly never used. + */ +int drm_markbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_device_dma_t *dma = dev->dma; + drm_buf_desc_t request; + int order; + drm_buf_entry_t *entry; + + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + return -EINVAL; + + if (!dma) + return -EINVAL; + + if (copy_from_user(&request, + (drm_buf_desc_t __user *) arg, sizeof(request))) + return -EFAULT; + + DRM_DEBUG("%d, %d, %d\n", + request.size, request.low_mark, request.high_mark); + order = drm_order(request.size); + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + entry = &dma->bufs[order]; + + if (request.low_mark < 0 || request.low_mark > entry->buf_count) + return -EINVAL; + if (request.high_mark < 0 || request.high_mark > entry->buf_count) + return -EINVAL; + + entry->freelist.low_mark = request.low_mark; + entry->freelist.high_mark = request.high_mark; + + return 0; +} + +/** + * Unreserve the buffers in list, previously reserved using drmDMA. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_buf_free structure. + * \return zero on success or a negative number on failure. + * + * Calls free_buffer() for each used buffer. + * This function is primarily used for debugging. + */ +int drm_freebufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_device_dma_t *dma = dev->dma; + drm_buf_free_t request; + int i; + int idx; + drm_buf_t *buf; + + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + return -EINVAL; + + if (!dma) + return -EINVAL; + + if (copy_from_user(&request, + (drm_buf_free_t __user *) arg, sizeof(request))) + return -EFAULT; + + DRM_DEBUG("%d\n", request.count); + for (i = 0; i < request.count; i++) { + if (copy_from_user(&idx, &request.list[i], sizeof(idx))) + return -EFAULT; + if (idx < 0 || idx >= dma->buf_count) { + DRM_ERROR("Index %d (of %d max)\n", + idx, dma->buf_count - 1); + return -EINVAL; + } + buf = dma->buflist[idx]; + if (buf->filp != filp) { + DRM_ERROR("Process %d freeing buffer not owned\n", + current->pid); + return -EINVAL; + } + drm_free_buffer(dev, buf); + } + + return 0; +} + +/** + * Maps all of the DMA buffers into client-virtual space (ioctl). + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg pointer to a drm_buf_map structure. + * \return zero on success or a negative number on failure. + * + * Maps the AGP or SG buffer region with do_mmap(), and copies information + * about each buffer into user space. The PCI buffers are already mapped on the + * addbufs_pci() call. + */ +int drm_mapbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_device_dma_t *dma = dev->dma; + drm_buf_map_t __user *argp = (void __user *)arg; + int retcode = 0; + const int zero = 0; + unsigned long virtual; + unsigned long address; + drm_buf_map_t request; + int i; + + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + return -EINVAL; + + if (!dma) + return -EINVAL; + + spin_lock(&dev->count_lock); + if (atomic_read(&dev->buf_alloc)) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + dev->buf_use++; /* Can't allocate more after this call */ + spin_unlock(&dev->count_lock); + + if (copy_from_user(&request, argp, sizeof(request))) + return -EFAULT; + + if (request.count >= dma->buf_count) { + if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) + || (drm_core_check_feature(dev, DRIVER_SG) + && (dma->flags & _DRM_DMA_USE_SG)) + || (drm_core_check_feature(dev, DRIVER_FB_DMA) + && (dma->flags & _DRM_DMA_USE_FB))) { + drm_map_t *map = dev->agp_buffer_map; + unsigned long token = dev->agp_buffer_token; + + if (!map) { + retcode = -EINVAL; + goto done; + } +#if LINUX_VERSION_CODE <= 0x020402 + down(¤t->mm->mmap_sem); +#else + down_write(¤t->mm->mmap_sem); +#endif + virtual = do_mmap(filp, 0, map->size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + token); +#if LINUX_VERSION_CODE <= 0x020402 + up(¤t->mm->mmap_sem); +#else + up_write(¤t->mm->mmap_sem); +#endif + } else { +#if LINUX_VERSION_CODE <= 0x020402 + down(¤t->mm->mmap_sem); +#else + down_write(¤t->mm->mmap_sem); +#endif + virtual = do_mmap(filp, 0, dma->byte_count, + PROT_READ | PROT_WRITE, + MAP_SHARED, 0); +#if LINUX_VERSION_CODE <= 0x020402 + up(¤t->mm->mmap_sem); +#else + up_write(¤t->mm->mmap_sem); +#endif + } + if (virtual > -1024UL) { + /* Real error */ + retcode = (signed long)virtual; + goto done; + } + request.virtual = (void __user *)virtual; + + for (i = 0; i < dma->buf_count; i++) { + if (copy_to_user(&request.list[i].idx, + &dma->buflist[i]->idx, + sizeof(request.list[0].idx))) { + retcode = -EFAULT; + goto done; + } + if (copy_to_user(&request.list[i].total, + &dma->buflist[i]->total, + sizeof(request.list[0].total))) { + retcode = -EFAULT; + goto done; + } + if (copy_to_user(&request.list[i].used, + &zero, sizeof(zero))) { + retcode = -EFAULT; + goto done; + } + address = virtual + dma->buflist[i]->offset; /* *** */ + if (copy_to_user(&request.list[i].address, + &address, sizeof(address))) { + retcode = -EFAULT; + goto done; + } + } + } + done: + request.count = dma->buf_count; + DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); + + if (copy_to_user(argp, &request, sizeof(request))) + return -EFAULT; + + return retcode; +} + +/** + * Compute size order. Returns the exponent of the smaller power of two which + * is greater or equal to given number. + * + * \param size size. + * \return order. + * + * \todo Can be made faster. + */ +int drm_order( unsigned long size ) +{ + int order; + unsigned long tmp; + + for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) + ; + + if (size & (size - 1)) + ++order; + + return order; +} +EXPORT_SYMBOL(drm_order); + + diff --git a/nx-X11/extras/drm/linux-core/drm_compat.h b/nx-X11/extras/drm/linux-core/drm_compat.h new file mode 100644 index 000000000..46d6c0e65 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_compat.h @@ -0,0 +1,212 @@ +/** + * \file drm_compat.h + * Backward compatability definitions for Direct Rendering Manager + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_COMPAT_H_ +#define _DRM_COMPAT_H_ + +#ifndef minor +#define minor(x) MINOR((x)) +#endif + +#ifndef MODULE_LICENSE +#define MODULE_LICENSE(x) +#endif + +#ifndef preempt_disable +#define preempt_disable() +#define preempt_enable() +#endif + +#ifndef pte_offset_map +#define pte_offset_map pte_offset +#define pte_unmap(pte) +#endif + +#ifndef module_param +#define module_param(name, type, perm) +#endif + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef list_for_each_entry +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + prefetch(pos->member.next); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member), \ + prefetch(pos->member.next)) +#endif + +#ifndef list_for_each_entry_safe +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) +static inline struct page *vmalloc_to_page(void *vmalloc_addr) +{ + unsigned long addr = (unsigned long)vmalloc_addr; + struct page *page = NULL; + pgd_t *pgd = pgd_offset_k(addr); + pmd_t *pmd; + pte_t *ptep, pte; + + if (!pgd_none(*pgd)) { + pmd = pmd_offset(pgd, addr); + if (!pmd_none(*pmd)) { + preempt_disable(); + ptep = pte_offset_map(pmd, addr); + pte = *ptep; + if (pte_present(pte)) + page = pte_page(pte); + pte_unmap(ptep); + preempt_enable(); + } + } + return page; +} +#endif + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2) +#define down_write down +#define up_write up +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) +#define DRM_PCI_DEV(pdev) &pdev->dev +#else +#define DRM_PCI_DEV(pdev) NULL +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +static inline unsigned iminor(struct inode *inode) +{ + return MINOR(inode->i_rdev); +} + +#define old_encode_dev(x) (x) + +struct drm_sysfs_class; +struct class_simple; +struct device; + +#define pci_dev_put(x) do {} while (0) +#define pci_get_subsys pci_find_subsys + +static inline struct class_device *DRM(sysfs_device_add) (struct drm_sysfs_class + * cs, dev_t dev, + struct device * + device, + const char *fmt, + ...) { + return NULL; +} + +static inline void DRM(sysfs_device_remove) (dev_t dev) { +} + +static inline void DRM(sysfs_destroy) (struct drm_sysfs_class * cs) { +} + +static inline struct drm_sysfs_class *DRM(sysfs_create) (struct module * owner, + char *name) { + return NULL; +} + +#ifndef pci_pretty_name +#define pci_pretty_name(x) x->name +#endif + +struct drm_device; +static inline int radeon_create_i2c_busses(struct drm_device *dev) +{ + return 0; +}; +static inline void radeon_delete_i2c_busses(struct drm_device *dev) +{ +}; + +#endif + +#ifndef __user +#define __user +#endif + +#ifndef __put_page +#define __put_page(p) atomic_dec(&(p)->count) +#endif + +#ifndef REMAP_PAGE_RANGE_5_ARGS +#define DRM_RPR_ARG(vma) +#else +#define DRM_RPR_ARG(vma) vma, +#endif + +#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) +static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot) +{ + return remap_page_range(DRM_RPR_ARG(vma) from, + pfn << PAGE_SHIFT, + size, + pgprot); +} +#endif + +/* old architectures */ +#ifdef __AMD64__ +#define __x86_64__ +#endif + +#ifndef pci_pretty_name +#define pci_pretty_name(dev) "" +#endif + +/* sysfs __ATTR macro */ +#ifndef __ATTR +#define __ATTR(_name,_mode,_show,_store) { \ + .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ + .show = _show, \ + .store = _store, \ +} +#endif + +#endif diff --git a/nx-X11/extras/drm/linux-core/drm_context.c b/nx-X11/extras/drm/linux-core/drm_context.c new file mode 100644 index 000000000..5947c8e6d --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_context.c @@ -0,0 +1,590 @@ +/** + * \file drm_context.c + * IOCTLs for generic contexts + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * ChangeLog: + * 2001-11-16 Torsten Duwe <duwe@caldera.de> + * added context constructor/destructor hooks, + * needed by SiS driver's memory management. + */ + +#include "drmP.h" + +/******************************************************************/ +/** \name Context bitmap support */ +/*@{*/ + +/** + * Free a handle from the context bitmap. + * + * \param dev DRM device. + * \param ctx_handle context handle. + * + * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry + * in drm_device::context_sareas, while holding the drm_device::struct_sem + * lock. + */ +void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) +{ + if (ctx_handle < 0) + goto failed; + if (!dev->ctx_bitmap) + goto failed; + + if (ctx_handle < DRM_MAX_CTXBITMAP) { + down(&dev->struct_sem); + clear_bit(ctx_handle, dev->ctx_bitmap); + dev->context_sareas[ctx_handle] = NULL; + up(&dev->struct_sem); + return; + } + failed: + DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle); + return; +} + +/** + * Context bitmap allocation. + * + * \param dev DRM device. + * \return (non-negative) context handle on success or a negative number on failure. + * + * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates + * drm_device::context_sareas to accommodate the new entry while holding the + * drm_device::struct_sem lock. + */ +static int drm_ctxbitmap_next(drm_device_t * dev) +{ + int bit; + + if (!dev->ctx_bitmap) + return -1; + + down(&dev->struct_sem); + bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP); + if (bit < DRM_MAX_CTXBITMAP) { + set_bit(bit, dev->ctx_bitmap); + DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit); + if ((bit + 1) > dev->max_context) { + dev->max_context = (bit + 1); + if (dev->context_sareas) { + drm_map_t **ctx_sareas; + + ctx_sareas = drm_realloc(dev->context_sareas, + (dev->max_context - + 1) * + sizeof(*dev-> + context_sareas), + dev->max_context * + sizeof(*dev-> + context_sareas), + DRM_MEM_MAPS); + if (!ctx_sareas) { + clear_bit(bit, dev->ctx_bitmap); + up(&dev->struct_sem); + return -1; + } + dev->context_sareas = ctx_sareas; + dev->context_sareas[bit] = NULL; + } else { + /* max_context == 1 at this point */ + dev->context_sareas = + drm_alloc(dev->max_context * + sizeof(*dev->context_sareas), + DRM_MEM_MAPS); + if (!dev->context_sareas) { + clear_bit(bit, dev->ctx_bitmap); + up(&dev->struct_sem); + return -1; + } + dev->context_sareas[bit] = NULL; + } + } + up(&dev->struct_sem); + return bit; + } + up(&dev->struct_sem); + return -1; +} + +/** + * Context bitmap initialization. + * + * \param dev DRM device. + * + * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding + * the drm_device::struct_sem lock. + */ +int drm_ctxbitmap_init(drm_device_t * dev) +{ + int i; + int temp; + + down(&dev->struct_sem); + dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE, + DRM_MEM_CTXBITMAP); + if (dev->ctx_bitmap == NULL) { + up(&dev->struct_sem); + return -ENOMEM; + } + memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE); + dev->context_sareas = NULL; + dev->max_context = -1; + up(&dev->struct_sem); + + for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { + temp = drm_ctxbitmap_next(dev); + DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp); + } + + return 0; +} + +/** + * Context bitmap cleanup. + * + * \param dev DRM device. + * + * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding + * the drm_device::struct_sem lock. + */ +void drm_ctxbitmap_cleanup(drm_device_t * dev) +{ + down(&dev->struct_sem); + if (dev->context_sareas) + drm_free(dev->context_sareas, + sizeof(*dev->context_sareas) * + dev->max_context, DRM_MEM_MAPS); + drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP); + up(&dev->struct_sem); +} + +/*@}*/ + +/******************************************************************/ +/** \name Per Context SAREA Support */ +/*@{*/ + +/** + * Get per-context SAREA. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument pointing to a drm_ctx_priv_map structure. + * \return zero on success or a negative number on failure. + * + * Gets the map from drm_device::context_sareas with the handle specified and + * returns its handle. + */ +int drm_getsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_ctx_priv_map_t __user *argp = (void __user *)arg; + drm_ctx_priv_map_t request; + drm_map_t *map; + drm_map_list_t *_entry; + + if (copy_from_user(&request, argp, sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + if (dev->max_context < 0 + || request.ctx_id >= (unsigned)dev->max_context) { + up(&dev->struct_sem); + return -EINVAL; + } + + map = dev->context_sareas[request.ctx_id]; + up(&dev->struct_sem); + + request.handle = 0; + list_for_each_entry(_entry, &dev->maplist->head,head) { + if (_entry->map == map) { + request.handle = (void *)(unsigned long)_entry->user_token; + break; + } + } + if (request.handle == 0) + return -EINVAL; + + if (copy_to_user(argp, &request, sizeof(request))) + return -EFAULT; + return 0; +} + +/** + * Set per-context SAREA. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument pointing to a drm_ctx_priv_map structure. + * \return zero on success or a negative number on failure. + * + * Searches the mapping specified in \p arg and update the entry in + * drm_device::context_sareas with it. + */ +int drm_setsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_ctx_priv_map_t request; + drm_map_t *map = NULL; + drm_map_list_t *r_list = NULL; + struct list_head *list; + + if (copy_from_user(&request, + (drm_ctx_priv_map_t __user *) arg, sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + list_for_each(list, &dev->maplist->head) { + r_list = list_entry(list, drm_map_list_t, head); + if (r_list->map + && r_list->user_token == (unsigned long) request.handle) + goto found; + } + bad: + up(&dev->struct_sem); + return -EINVAL; + + found: + map = r_list->map; + if (!map) + goto bad; + if (dev->max_context < 0) + goto bad; + if (request.ctx_id >= (unsigned)dev->max_context) + goto bad; + dev->context_sareas[request.ctx_id] = map; + up(&dev->struct_sem); + return 0; +} + +/*@}*/ + +/******************************************************************/ +/** \name The actual DRM context handling routines */ +/*@{*/ + +/** + * Switch context. + * + * \param dev DRM device. + * \param old old context handle. + * \param new new context handle. + * \return zero on success or a negative number on failure. + * + * Attempt to set drm_device::context_flag. + */ +static int drm_context_switch(drm_device_t * dev, int old, int new) +{ + if (test_and_set_bit(0, &dev->context_flag)) { + DRM_ERROR("Reentering -- FIXME\n"); + return -EBUSY; + } + + DRM_DEBUG("Context switch from %d to %d\n", old, new); + + if (new == dev->last_context) { + clear_bit(0, &dev->context_flag); + return 0; + } + + return 0; +} + +/** + * Complete context switch. + * + * \param dev DRM device. + * \param new new context handle. + * \return zero on success or a negative number on failure. + * + * Updates drm_device::last_context and drm_device::last_switch. Verifies the + * hardware lock is held, clears the drm_device::context_flag and wakes up + * drm_device::context_wait. + */ +int drm_context_switch_complete(drm_device_t * dev, int new) +{ + dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ + dev->last_switch = jiffies; + + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("Lock isn't held after context switch\n"); + } + + /* If a context switch is ever initiated + when the kernel holds the lock, release + that lock here. */ + clear_bit(0, &dev->context_flag); + wake_up(&dev->context_wait); + + return 0; +} + +/** + * Reserve contexts. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument pointing to a drm_ctx_res structure. + * \return zero on success or a negative number on failure. + */ +int drm_resctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_ctx_res_t res; + drm_ctx_t __user *argp = (void __user *)arg; + drm_ctx_t ctx; + int i; + + if (copy_from_user(&res, argp, sizeof(res))) + return -EFAULT; + + if (res.count >= DRM_RESERVED_CONTEXTS) { + memset(&ctx, 0, sizeof(ctx)); + for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { + ctx.handle = i; + if (copy_to_user(&res.contexts[i], &ctx, sizeof(ctx))) + return -EFAULT; + } + } + res.count = DRM_RESERVED_CONTEXTS; + + if (copy_to_user(argp, &res, sizeof(res))) + return -EFAULT; + return 0; +} + +/** + * Add context. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument pointing to a drm_ctx structure. + * \return zero on success or a negative number on failure. + * + * Get a new handle for the context and copy to userspace. + */ +int drm_addctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_ctx_list_t *ctx_entry; + drm_ctx_t __user *argp = (void __user *)arg; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, argp, sizeof(ctx))) + return -EFAULT; + + ctx.handle = drm_ctxbitmap_next(dev); + if (ctx.handle == DRM_KERNEL_CONTEXT) { + /* Skip kernel's context and get a new one. */ + ctx.handle = drm_ctxbitmap_next(dev); + } + DRM_DEBUG("%d\n", ctx.handle); + if (ctx.handle == -1) { + DRM_DEBUG("Not enough free contexts.\n"); + /* Should this return -EBUSY instead? */ + return -ENOMEM; + } + + if (ctx.handle != DRM_KERNEL_CONTEXT) { + if (dev->driver->context_ctor) + dev->driver->context_ctor(dev, ctx.handle); + } + + ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST); + if (!ctx_entry) { + DRM_DEBUG("out of memory\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&ctx_entry->head); + ctx_entry->handle = ctx.handle; + ctx_entry->tag = priv; + + down(&dev->ctxlist_sem); + list_add(&ctx_entry->head, &dev->ctxlist->head); + ++dev->ctx_count; + up(&dev->ctxlist_sem); + + if (copy_to_user(argp, &ctx, sizeof(ctx))) + return -EFAULT; + return 0; +} + +int drm_modctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + /* This does nothing */ + return 0; +} + +/** + * Get context. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument pointing to a drm_ctx structure. + * \return zero on success or a negative number on failure. + */ +int drm_getctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_ctx_t __user *argp = (void __user *)arg; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, argp, sizeof(ctx))) + return -EFAULT; + + /* This is 0, because we don't handle any context flags */ + ctx.flags = 0; + + if (copy_to_user(argp, &ctx, sizeof(ctx))) + return -EFAULT; + return 0; +} + +/** + * Switch context. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument pointing to a drm_ctx structure. + * \return zero on success or a negative number on failure. + * + * Calls context_switch(). + */ +int drm_switchctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + return -EFAULT; + + DRM_DEBUG("%d\n", ctx.handle); + return drm_context_switch(dev, dev->last_context, ctx.handle); +} + +/** + * New context. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument pointing to a drm_ctx structure. + * \return zero on success or a negative number on failure. + * + * Calls context_switch_complete(). + */ +int drm_newctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + return -EFAULT; + + DRM_DEBUG("%d\n", ctx.handle); + drm_context_switch_complete(dev, ctx.handle); + + return 0; +} + +/** + * Remove context. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument pointing to a drm_ctx structure. + * \return zero on success or a negative number on failure. + * + * If not the special kernel context, calls ctxbitmap_free() to free the specified context. + */ +int drm_rmctx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + return -EFAULT; + + DRM_DEBUG("%d\n", ctx.handle); + if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { + priv->remove_auth_on_close = 1; + } + if (ctx.handle != DRM_KERNEL_CONTEXT) { + if (dev->driver->context_dtor) + dev->driver->context_dtor(dev, ctx.handle); + drm_ctxbitmap_free(dev, ctx.handle); + } + + down(&dev->ctxlist_sem); + if (!list_empty(&dev->ctxlist->head)) { + drm_ctx_list_t *pos, *n; + + list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) { + if (pos->handle == ctx.handle) { + list_del(&pos->head); + drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); + --dev->ctx_count; + } + } + } + up(&dev->ctxlist_sem); + + return 0; +} + +/*@}*/ diff --git a/nx-X11/extras/drm/linux-core/drm_core.h b/nx-X11/extras/drm/linux-core/drm_core.h new file mode 100644 index 000000000..a9a851b7b --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_core.h @@ -0,0 +1,37 @@ +/* + * Copyright 2004 Jon Smirl <jonsmirl@gmail.com> + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl" + +#define CORE_NAME "drm" +#define CORE_DESC "DRM shared core routines" +#define CORE_DATE "20051102" + +#define DRM_IF_MAJOR 1 +#define DRM_IF_MINOR 2 + +#define CORE_MAJOR 1 +#define CORE_MINOR 0 +#define CORE_PATCHLEVEL 1 diff --git a/nx-X11/extras/drm/linux-core/drm_dma.c b/nx-X11/extras/drm/linux-core/drm_dma.c new file mode 100644 index 000000000..e7d9e8260 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_dma.c @@ -0,0 +1,180 @@ +/** + * \file drm_dma.c + * DMA IOCTL and function support + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +/** + * Initialize the DMA data. + * + * \param dev DRM device. + * \return zero on success or a negative value on failure. + * + * Allocate and initialize a drm_device_dma structure. + */ +int drm_dma_setup(drm_device_t * dev) +{ + int i; + + dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER); + if (!dev->dma) + return -ENOMEM; + + memset(dev->dma, 0, sizeof(*dev->dma)); + + for (i = 0; i <= DRM_MAX_ORDER; i++) + memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); + + return 0; +} + +/** + * Cleanup the DMA resources. + * + * \param dev DRM device. + * + * Free all pages associated with DMA buffers, the buffers and pages lists, and + * finally the the drm_device::dma structure itself. + */ +void drm_dma_takedown(drm_device_t * dev) +{ + drm_device_dma_t *dma = dev->dma; + int i, j; + + if (!dma) + return; + + /* Clear dma buffers */ + for (i = 0; i <= DRM_MAX_ORDER; i++) { + if (dma->bufs[i].seg_count) { + DRM_DEBUG("order %d: buf_count = %d," + " seg_count = %d\n", + i, + dma->bufs[i].buf_count, + dma->bufs[i].seg_count); + for (j = 0; j < dma->bufs[i].seg_count; j++) { + if (dma->bufs[i].seglist[j]) { + drm_free_pages(dma->bufs[i].seglist[j], + dma->bufs[i].page_order, + DRM_MEM_DMA); + } + } + drm_free(dma->bufs[i].seglist, + dma->bufs[i].seg_count + * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS); + } + if (dma->bufs[i].buf_count) { + for (j = 0; j < dma->bufs[i].buf_count; j++) { + if (dma->bufs[i].buflist[j].dev_private) { + drm_free(dma->bufs[i].buflist[j]. + dev_private, + dma->bufs[i].buflist[j]. + dev_priv_size, DRM_MEM_BUFS); + } + } + drm_free(dma->bufs[i].buflist, + dma->bufs[i].buf_count * + sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS); + } + } + + if (dma->buflist) { + drm_free(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS); + } + + if (dma->pagelist) { + drm_free(dma->pagelist, + dma->page_count * sizeof(*dma->pagelist), + DRM_MEM_PAGES); + } + drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER); + dev->dma = NULL; +} + +/** + * Free a buffer. + * + * \param dev DRM device. + * \param buf buffer to free. + * + * Resets the fields of \p buf. + */ +void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) +{ + if (!buf) + return; + + buf->waiting = 0; + buf->pending = 0; + buf->filp = NULL; + buf->used = 0; + + if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) + && waitqueue_active(&buf->dma_wait)) { + wake_up_interruptible(&buf->dma_wait); + } +} + +/** + * Reclaim the buffers. + * + * \param filp file pointer. + * + * Frees each buffer associated with \p filp not already on the hardware. + */ +void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp) +{ + drm_device_dma_t *dma = dev->dma; + int i; + + if (!dma) + return; + for (i = 0; i < dma->buf_count; i++) { + if (dma->buflist[i]->filp == filp) { + switch (dma->buflist[i]->list) { + case DRM_LIST_NONE: + drm_free_buffer(dev, dma->buflist[i]); + break; + case DRM_LIST_WAIT: + dma->buflist[i]->list = DRM_LIST_RECLAIM; + break; + default: + /* Buffer already on hardware. */ + break; + } + } + } +} +EXPORT_SYMBOL(drm_core_reclaim_buffers); diff --git a/nx-X11/extras/drm/linux-core/drm_drawable.c b/nx-X11/extras/drm/linux-core/drm_drawable.c new file mode 100644 index 000000000..7857453c4 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_drawable.c @@ -0,0 +1,56 @@ +/** + * \file drm_drawable.c + * IOCTLs for drawables + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +/** No-op. */ +int drm_adddraw(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_draw_t draw; + + draw.handle = 0; /* NOOP */ + DRM_DEBUG("%d\n", draw.handle); + if (copy_to_user((drm_draw_t __user *) arg, &draw, sizeof(draw))) + return -EFAULT; + return 0; +} + +/** No-op. */ +int drm_rmdraw(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + return 0; /* NOOP */ +} diff --git a/nx-X11/extras/drm/linux-core/drm_drv.c b/nx-X11/extras/drm/linux-core/drm_drv.c new file mode 100644 index 000000000..1e711c087 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_drv.c @@ -0,0 +1,564 @@ +/** + * \file drm_drv.c + * Generic driver template + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + * + * To use this template, you must at least define the following (samples + * given for the MGA driver): + * + * \code + * #define DRIVER_AUTHOR "VA Linux Systems, Inc." + * + * #define DRIVER_NAME "mga" + * #define DRIVER_DESC "Matrox G200/G400" + * #define DRIVER_DATE "20001127" + * + * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls ) + * + * #define drm_x mga_##x + * \endcode + */ + +/* + * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "drmP.h" +#include "drm_core.h" + +static void __exit drm_cleanup(drm_device_t * dev); +int drm_fb_loaded = 0; + +static int drm_version(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +/** Ioctl table */ +drm_ioctl_desc_t drm_ioctls[] = { + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0}, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0}, + [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0}, + [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY}, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH}, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH}, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH}, + + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH}, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH}, + /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH}, + + [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + +#if __OS_HAS_AGP + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, +#endif + + [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + + [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, +}; + +#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls ) + +/** + * Take down the DRM device. + * + * \param dev DRM device structure. + * + * Frees every resource in \p dev. + * + * \sa drm_device + */ +int drm_lastclose(drm_device_t * dev) +{ + drm_magic_entry_t *pt, *next; + drm_map_list_t *r_list; + drm_vma_entry_t *vma, *vma_next; + int i; + + DRM_DEBUG("\n"); + + if (dev->driver->lastclose) + dev->driver->lastclose(dev); + DRM_DEBUG("driver lastclose completed\n"); + + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); + dev->unique=NULL; + dev->unique_len=0; + } + + if (dev->irq_enabled) + drm_irq_uninstall(dev); + + down(&dev->struct_sem); + del_timer(&dev->timer); + + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); + dev->unique = NULL; + dev->unique_len = 0; + } + + /* Clear pid list */ + for (i = 0; i < DRM_HASH_SIZE; i++) { + for (pt = dev->magiclist[i].head; pt; pt = next) { + next = pt->next; + drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + } + dev->magiclist[i].head = dev->magiclist[i].tail = NULL; + } + + /* Clear AGP information */ + if (drm_core_has_AGP(dev) && dev->agp) { + drm_agp_mem_t *entry; + drm_agp_mem_t *nexte; + + /* Remove AGP resources, but leave dev->agp + intact until drv_cleanup is called. */ + for (entry = dev->agp->memory; entry; entry = nexte) { + nexte = entry->next; + if (entry->bound) + drm_unbind_agp(entry->memory); + drm_free_agp(entry->memory, entry->pages); + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + } + dev->agp->memory = NULL; + + if (dev->agp->acquired) + drm_agp_release(dev); + + dev->agp->acquired = 0; + dev->agp->enabled = 0; + } + if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { + drm_sg_cleanup(dev->sg); + dev->sg = NULL; + } + + /* Clear vma list (only built for debugging) */ + if (dev->vmalist) { + for (vma = dev->vmalist; vma; vma = vma_next) { + vma_next = vma->next; + drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + } + dev->vmalist = NULL; + } + + if (dev->maplist) { + while (!list_empty(&dev->maplist->head)) { + struct list_head *list = dev->maplist->head.next; + r_list = list_entry(list, drm_map_list_t, head); + drm_rmmap_locked(dev, r_list->map); + } + } + + if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { + for (i = 0; i < dev->queue_count; i++) { + + if (dev->queuelist[i]) { + drm_free(dev->queuelist[i], + sizeof(*dev->queuelist[0]), + DRM_MEM_QUEUES); + dev->queuelist[i] = NULL; + } + } + drm_free(dev->queuelist, + dev->queue_slots * sizeof(*dev->queuelist), + DRM_MEM_QUEUES); + dev->queuelist = NULL; + } + dev->queue_count = 0; + + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + drm_dma_takedown(dev); + + if (dev->lock.hw_lock) { + dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ + dev->lock.filp = NULL; + wake_up_interruptible(&dev->lock.lock_queue); + } + up(&dev->struct_sem); + + DRM_DEBUG("lastclose completed\n"); + return 0; +} + +void __exit drm_cleanup_pci(struct pci_dev *pdev) +{ + drm_device_t *dev = pci_get_drvdata(pdev); + + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + if (dev) + drm_cleanup(dev); +} +EXPORT_SYMBOL(drm_cleanup_pci); + +/** + * Module initialization. Called via init_module at module load time, or via + * linux/init/main.c (this is not currently supported). + * + * \return zero on success or a negative number on failure. + * + * Initializes an array of drm_device structures, and attempts to + * initialize all available devices, using consecutive minors, registering the + * stubs and initializing the AGP device. + * + * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and + * after the initialization for driver customization. + */ +int drm_init(struct drm_driver *driver, + struct pci_device_id *pciidlist) +{ + struct pci_dev *pdev; + struct pci_device_id *pid; + int rc, i; + + DRM_DEBUG("\n"); + + for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) { + pid = &pciidlist[i]; + + pdev = NULL; + /* pass back in pdev to account for multiple identical cards */ + while ((pdev = + pci_get_subsys(pid->vendor, pid->device, pid->subvendor, + pid->subdevice, pdev))) { + /* is there already a driver loaded, or (short circuit saves work) */ + /* does something like VesaFB have control of the memory region? */ + if (pci_dev_driver(pdev) + || pci_request_regions(pdev, "DRM scan")) { + /* go into stealth mode */ + drm_fb_loaded = 1; + pci_dev_put(pdev); + break; + } + /* no fbdev or vesadev, put things back and wait for normal probe */ + pci_release_regions(pdev); + } + } + + if (!drm_fb_loaded) + pci_register_driver(&driver->pci_driver); + else { + for (i = 0; pciidlist[i].vendor != 0; i++) { + pid = &pciidlist[i]; + + pdev = NULL; + /* pass back in pdev to account for multiple identical cards */ + while ((pdev = + pci_get_subsys(pid->vendor, pid->device, + pid->subvendor, pid->subdevice, + pdev))) { + /* stealth mode requires a manual probe */ + pci_dev_get(pdev); + if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) { + pci_dev_put(pdev); + return rc; + } + } + } + DRM_INFO("Used old pci detect: framebuffer loaded\n"); + } + return 0; +} +EXPORT_SYMBOL(drm_init); + +/** + * Called via cleanup_module() at module unload time. + * + * Cleans up all DRM device, calling drm_lastclose(). + * + * \sa drm_init + */ +static void __exit drm_cleanup(drm_device_t * dev) +{ + + DRM_DEBUG("\n"); + if (!dev) { + DRM_ERROR("cleanup called no dev\n"); + return; + } + + drm_lastclose(dev); + + if (dev->maplist) { + drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); + dev->maplist = NULL; + } + + if (!drm_fb_loaded) + pci_disable_device(dev->pdev); + + drm_ctxbitmap_cleanup(dev); + + if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp + && dev->agp->agp_mtrr >= 0) { + int retval; + retval = mtrr_del(dev->agp->agp_mtrr, + dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * 1024 * 1024); + DRM_DEBUG("mtrr_del=%d\n", retval); + } + + if (drm_core_has_AGP(dev) && dev->agp) { + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } + if (dev->driver->unload) + dev->driver->unload(dev); + + drm_put_head(&dev->primary); + if (drm_put_dev(dev)) + DRM_ERROR("Cannot unload module\n"); +} + +void __exit drm_exit(struct drm_driver *driver) +{ + int i; + drm_device_t *dev = NULL; + drm_head_t *head; + + DRM_DEBUG("\n"); + if (drm_fb_loaded) { + for (i = 0; i < cards_limit; i++) { + head = drm_heads[i]; + if (!head) + continue; + if (!head->dev) + continue; + if (head->dev->driver != driver) + continue; + dev = head->dev; + } + if (dev) { + /* release the pci driver */ + if (dev->pdev) + pci_dev_put(dev->pdev); + drm_cleanup(dev); + } + } else + pci_unregister_driver(&driver->pci_driver); + DRM_INFO("Module unloaded\n"); +} +EXPORT_SYMBOL(drm_exit); + +/** File operations structure */ +static struct file_operations drm_stub_fops = { + .owner = THIS_MODULE, + .open = drm_stub_open +}; + +static int __init drm_core_init(void) +{ + int ret = -ENOMEM; + + cards_limit = + (cards_limit < DRM_MAX_MINOR + 1 ? cards_limit : DRM_MAX_MINOR + 1); + drm_heads = drm_calloc(cards_limit, sizeof(*drm_heads), DRM_MEM_STUB); + if (!drm_heads) + goto err_p1; + + if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) + goto err_p1; + + drm_class = drm_sysfs_create(THIS_MODULE, "drm"); + if (IS_ERR(drm_class)) { + printk(KERN_ERR "DRM: Error creating drm class.\n"); + ret = PTR_ERR(drm_class); + goto err_p2; + } + + drm_proc_root = create_proc_entry("dri", S_IFDIR, NULL); + if (!drm_proc_root) { + DRM_ERROR("Cannot create /proc/dri\n"); + ret = -1; + goto err_p3; + } + + drm_mem_init(); + + DRM_INFO("Initialized %s %d.%d.%d %s\n", + CORE_NAME, + CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); + return 0; +err_p3: + drm_sysfs_destroy(drm_class); +err_p2: + unregister_chrdev(DRM_MAJOR, "drm"); + drm_free(drm_heads, sizeof(*drm_heads) * cards_limit, DRM_MEM_STUB); +err_p1: + return ret; +} + +static void __exit drm_core_exit(void) +{ + remove_proc_entry("dri", NULL); + drm_sysfs_destroy(drm_class); + + unregister_chrdev(DRM_MAJOR, "drm"); + + drm_free(drm_heads, sizeof(*drm_heads) * cards_limit, DRM_MEM_STUB); +} + +module_init(drm_core_init); +module_exit(drm_core_exit); + +/** + * Get version information + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_version structure. + * \return zero on success or negative number on failure. + * + * Fills in the version information in \p arg. + */ +static int drm_version(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_version_t __user *argp = (void __user *)arg; + drm_version_t version; + int len; + + if (copy_from_user(&version, argp, sizeof(version))) + return -EFAULT; + + version.version_major = dev->driver->major; + version.version_minor = dev->driver->minor; + version.version_patchlevel = dev->driver->patchlevel; + DRM_COPY(version.name, dev->driver->name); + DRM_COPY(version.date, dev->driver->date); + DRM_COPY(version.desc, dev->driver->desc); + + if (copy_to_user(argp, &version, sizeof(version))) + return -EFAULT; + return 0; +} + +/** + * Called whenever a process performs an ioctl on /dev/drm. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + * + * Looks up the ioctl function in the ::ioctls table, checking for root + * previleges if so required, and dispatches to the respective function. + */ +int drm_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_ioctl_desc_t *ioctl; + drm_ioctl_t *func; + unsigned int nr = DRM_IOCTL_NR(cmd); + int retcode = -EINVAL; + + atomic_inc(&dev->ioctl_count); + atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); + ++priv->ioctl_count; + + DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", + current->pid, cmd, nr, (long)old_encode_dev(priv->head->device), + priv->authenticated); + + if (nr < DRIVER_IOCTL_COUNT) + ioctl = &drm_ioctls[nr]; + else if ((nr >= DRM_COMMAND_BASE) + && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) + ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; + else + goto err_i1; + + func = ioctl->func; + if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) /* Local override? */ + func = dev->driver->dma_ioctl; + + if (!func) { + DRM_DEBUG("no function\n"); + retcode = -EINVAL; + } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || + ((ioctl->flags & DRM_AUTH) && !priv->authenticated) || + ((ioctl->flags & DRM_MASTER) && !priv->master)) { + retcode = -EACCES; + } else { + retcode = func(inode, filp, cmd, arg); + } +err_i1: + atomic_dec(&dev->ioctl_count); + if (retcode) + DRM_DEBUG("ret = %x\n", retcode); + return retcode; +} +EXPORT_SYMBOL(drm_ioctl); diff --git a/nx-X11/extras/drm/linux-core/drm_fops.c b/nx-X11/extras/drm/linux-core/drm_fops.c new file mode 100644 index 000000000..c79372d30 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_fops.c @@ -0,0 +1,497 @@ +/** + * \file drm_fops.c + * File operations for DRM + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Daryll Strauss <daryll@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/poll.h> + +#include "drmP.h" +#include "drm_sarea.h" + +static int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t * dev); + +static int drm_setup(drm_device_t * dev) +{ + drm_local_map_t *map; + int i; + + if (dev->driver->firstopen) + dev->driver->firstopen(dev); + + /* prebuild the SAREA */ + i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); + if (i != 0) + return i; + + atomic_set(&dev->ioctl_count, 0); + atomic_set(&dev->vma_count, 0); + dev->buf_use = 0; + atomic_set(&dev->buf_alloc, 0); + + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { + i = drm_dma_setup(dev); + if (i < 0) + return i; + } + + for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++) + atomic_set(&dev->counts[i], 0); + + for (i = 0; i < DRM_HASH_SIZE; i++) { + dev->magiclist[i].head = NULL; + dev->magiclist[i].tail = NULL; + } + + dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST); + if (dev->ctxlist == NULL) + return -ENOMEM; + memset(dev->ctxlist, 0, sizeof(*dev->ctxlist)); + INIT_LIST_HEAD(&dev->ctxlist->head); + + dev->vmalist = NULL; + dev->sigdata.lock = NULL; + init_waitqueue_head(&dev->lock.lock_queue); + dev->queue_count = 0; + dev->queue_reserved = 0; + dev->queue_slots = 0; + dev->queuelist = NULL; + dev->irq_enabled = 0; + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; + dev->last_context = 0; + dev->last_switch = 0; + dev->last_checked = 0; + init_waitqueue_head(&dev->context_wait); + dev->if_version = 0; + + dev->ctx_start = 0; + dev->lck_start = 0; + + dev->buf_async = NULL; + init_waitqueue_head(&dev->buf_readers); + init_waitqueue_head(&dev->buf_writers); + + DRM_DEBUG("\n"); + + /* + * The kernel's context could be created here, but is now created + * in drm_dma_enqueue. This is more resource-efficient for + * hardware that does not do DMA, but may mean that + * drm_select_queue fails between the time the interrupt is + * initialized and the time the queues are initialized. + */ + + return 0; +} + +/** + * Open file. + * + * \param inode device inode + * \param filp file pointer. + * \return zero on success or a negative number on failure. + * + * Searches the DRM device with the same minor number, calls open_helper(), and + * increments the device open count. If the open count was previous at zero, + * i.e., it's the first that the device is open, then calls setup(). + */ +int drm_open(struct inode *inode, struct file *filp) +{ + drm_device_t *dev = NULL; + int minor = iminor(inode); + int retcode = 0; + + if (!((minor >= 0) && (minor < cards_limit))) + return -ENODEV; + + if (!drm_heads[minor]) + return -ENODEV; + + if (!(dev = drm_heads[minor]->dev)) + return -ENODEV; + + retcode = drm_open_helper(inode, filp, dev); + if (!retcode) { + atomic_inc(&dev->counts[_DRM_STAT_OPENS]); + spin_lock(&dev->count_lock); + if (!dev->open_count++) { + spin_unlock(&dev->count_lock); + return drm_setup(dev); + } + spin_unlock(&dev->count_lock); + } + + return retcode; +} +EXPORT_SYMBOL(drm_open); + +/** + * File \c open operation. + * + * \param inode device inode. + * \param filp file pointer. + * + * Puts the dev->fops corresponding to the device minor number into + * \p filp, call the \c open method, and restore the file operations. + */ +int drm_stub_open(struct inode *inode, struct file *filp) +{ + drm_device_t *dev = NULL; + int minor = iminor(inode); + int err = -ENODEV; + struct file_operations *old_fops; + + DRM_DEBUG("\n"); + + if (!((minor >= 0) && (minor < cards_limit))) + return -ENODEV; + + if (!drm_heads[minor]) + return -ENODEV; + + if (!(dev = drm_heads[minor]->dev)) + return -ENODEV; + + old_fops = filp->f_op; + filp->f_op = fops_get(&dev->driver->fops); + if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { + fops_put(filp->f_op); + filp->f_op = fops_get(old_fops); + } + fops_put(old_fops); + + return err; +} + +/** + * Check whether DRI will run on this CPU. + * + * \return non-zero if the DRI will run on this CPU, or zero otherwise. + */ +static int drm_cpu_valid(void) +{ +#if defined(__i386__) + if (boot_cpu_data.x86 == 3) + return 0; /* No cmpxchg on a 386 */ +#endif +#if defined(__sparc__) && !defined(__sparc_v9__) + return 0; /* No cmpxchg before v9 sparc. */ +#endif + return 1; +} + +/** + * Called whenever a process opens /dev/drm. + * + * \param inode device inode. + * \param filp file pointer. + * \param dev device. + * \return zero on success or a negative number on failure. + * + * Creates and initializes a drm_file structure for the file private data in \p + * filp and add it into the double linked list in \p dev. + */ +static int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t * dev) +{ + int minor = iminor(inode); + drm_file_t *priv; + int ret; + + if (filp->f_flags & O_EXCL) + return -EBUSY; /* No exclusive opens */ + if (!drm_cpu_valid()) + return -EINVAL; + + DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor); + + priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); + if (!priv) + return -ENOMEM; + + memset(priv, 0, sizeof(*priv)); + filp->private_data = priv; + priv->uid = current->euid; + priv->pid = current->pid; + priv->minor = minor; + priv->head = drm_heads[minor]; + priv->ioctl_count = 0; + /* for compatibility root is always authenticated */ + priv->authenticated = capable(CAP_SYS_ADMIN); + priv->lock_count = 0; + + if (dev->driver->open) { + ret = dev->driver->open(dev, priv); + if (ret < 0) + goto out_free; + } + + down(&dev->struct_sem); + if (!dev->file_last) { + priv->next = NULL; + priv->prev = NULL; + dev->file_first = priv; + dev->file_last = priv; + /* first opener automatically becomes master */ + priv->master = 1; + } else { + priv->next = NULL; + priv->prev = dev->file_last; + dev->file_last->next = priv; + dev->file_last = priv; + } + up(&dev->struct_sem); + +#ifdef __alpha__ + /* + * Default the hose + */ + if (!dev->hose) { + struct pci_dev *pci_dev; + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); + if (pci_dev) { + dev->hose = pci_dev->sysdata; + pci_dev_put(pci_dev); + } + if (!dev->hose) { + struct pci_bus *b = pci_bus_b(pci_root_buses.next); + if (b) + dev->hose = b->sysdata; + } + } +#endif + + return 0; + out_free: + drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + filp->private_data = NULL; + return ret; +} + +/** No-op. */ +int drm_fasync(int fd, struct file *filp, int on) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + int retcode; + + DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, + (long)old_encode_dev(priv->head->device)); + retcode = fasync_helper(fd, filp, on, &dev->buf_async); + if (retcode < 0) + return retcode; + return 0; +} +EXPORT_SYMBOL(drm_fasync); + +/** + * Release file. + * + * \param inode device inode + * \param filp file pointer. + * \return zero on success or a negative number on failure. + * + * If the hardware lock is held then free it, and take it again for the kernel + * context since it's necessary to reclaim buffers. Unlink the file private + * data from its list and free it. Decreases the open count and if it reaches + * zero calls takedown(). + */ +int drm_release(struct inode *inode, struct file *filp) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev; + int retcode = 0; + + lock_kernel(); + dev = priv->head->dev; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + + if (dev->driver->preclose) + dev->driver->preclose(dev, filp); + + /* ======================================================== + * Begin inline drm_release + */ + + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", + current->pid, (long)old_encode_dev(priv->head->device), + dev->open_count); + + if (priv->lock_count && dev->lock.hw_lock && + _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && + dev->lock.filp == filp) { + DRM_DEBUG("File %p released, freeing lock for context %d\n", + filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + + if (dev->driver->reclaim_buffers_locked) + dev->driver->reclaim_buffers_locked(dev, filp); + + drm_lock_free(dev, &dev->lock.hw_lock->lock, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + + /* FIXME: may require heavy-handed reset of + hardware at this point, possibly + processed via a callback to the X + server. */ + } else if (dev->driver->reclaim_buffers_locked && priv->lock_count + && dev->lock.hw_lock) { + /* The lock is required to reclaim buffers */ + DECLARE_WAITQUEUE(entry, current); + + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + retcode = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + dev->lock.filp = filp; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); + break; /* Got lock */ + } + /* Contention */ + schedule(); + if (signal_pending(current)) { + retcode = -ERESTARTSYS; + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + if (!retcode) { + dev->driver->reclaim_buffers_locked(dev, filp); + drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT); + } + } + + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && + !dev->driver->reclaim_buffers_locked) { + dev->driver->reclaim_buffers(dev, filp); + } + + drm_fasync(-1, filp, 0); + + down(&dev->ctxlist_sem); + if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) { + drm_ctx_list_t *pos, *n; + + list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) { + if (pos->tag == priv && + pos->handle != DRM_KERNEL_CONTEXT) { + if (dev->driver->context_dtor) + dev->driver->context_dtor(dev, + pos->handle); + + drm_ctxbitmap_free(dev, pos->handle); + + list_del(&pos->head); + drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); + --dev->ctx_count; + } + } + } + up(&dev->ctxlist_sem); + + down(&dev->struct_sem); + if (priv->remove_auth_on_close == 1) { + drm_file_t *temp = dev->file_first; + while (temp) { + temp->authenticated = 0; + temp = temp->next; + } + } + if (priv->prev) { + priv->prev->next = priv->next; + } else { + dev->file_first = priv->next; + } + if (priv->next) { + priv->next->prev = priv->prev; + } else { + dev->file_last = priv->prev; + } + up(&dev->struct_sem); + + if (dev->driver->postclose) + dev->driver->postclose(dev, priv); + drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + + /* ======================================================== + * End inline drm_release + */ + + atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), dev->blocked); + spin_unlock(&dev->count_lock); + unlock_kernel(); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + unlock_kernel(); + return drm_lastclose(dev); + } + spin_unlock(&dev->count_lock); + + unlock_kernel(); + + return retcode; +} +EXPORT_SYMBOL(drm_release); + +/** No-op. */ +/* This is to deal with older X servers that believe 0 means data is + * available which is not the correct return for a poll function. + * This cannot be fixed until the Xserver is fixed. Xserver will need + * to set a newer interface version to avoid breaking older Xservers. + * Without fixing the Xserver you get: "WaitForSomething(): select: errno=22" + * http://freedesktop.org/bugzilla/show_bug.cgi?id=1505 if you try + * to return the correct response. + */ +unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) +{ + /* return (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM); */ + return 0; +} +EXPORT_SYMBOL(drm_poll); + diff --git a/nx-X11/extras/drm/linux-core/drm_ioc32.c b/nx-X11/extras/drm/linux-core/drm_ioc32.c new file mode 100644 index 000000000..8087a9636 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_ioc32.c @@ -0,0 +1,1069 @@ +/** + * \file drm_ioc32.c + * + * 32-bit ioctl compatibility routines for the DRM. + * + * \author Paul Mackerras <paulus@samba.org> + * + * Copyright (C) Paul Mackerras 2005. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#include <linux/compat.h> +#include <linux/ioctl32.h> + +#include "drmP.h" +#include "drm_core.h" + +#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t) +#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t) +#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t) +#define DRM_IOCTL_GET_CLIENT32 DRM_IOWR(0x05, drm_client32_t) +#define DRM_IOCTL_GET_STATS32 DRM_IOR( 0x06, drm_stats32_t) + +#define DRM_IOCTL_SET_UNIQUE32 DRM_IOW( 0x10, drm_unique32_t) +#define DRM_IOCTL_ADD_MAP32 DRM_IOWR(0x15, drm_map32_t) +#define DRM_IOCTL_ADD_BUFS32 DRM_IOWR(0x16, drm_buf_desc32_t) +#define DRM_IOCTL_MARK_BUFS32 DRM_IOW( 0x17, drm_buf_desc32_t) +#define DRM_IOCTL_INFO_BUFS32 DRM_IOWR(0x18, drm_buf_info32_t) +#define DRM_IOCTL_MAP_BUFS32 DRM_IOWR(0x19, drm_buf_map32_t) +#define DRM_IOCTL_FREE_BUFS32 DRM_IOW( 0x1a, drm_buf_free32_t) + +#define DRM_IOCTL_RM_MAP32 DRM_IOW( 0x1b, drm_map32_t) + +#define DRM_IOCTL_SET_SAREA_CTX32 DRM_IOW( 0x1c, drm_ctx_priv_map32_t) +#define DRM_IOCTL_GET_SAREA_CTX32 DRM_IOWR(0x1d, drm_ctx_priv_map32_t) + +#define DRM_IOCTL_RES_CTX32 DRM_IOWR(0x26, drm_ctx_res32_t) +#define DRM_IOCTL_DMA32 DRM_IOWR(0x29, drm_dma32_t) + +#define DRM_IOCTL_AGP_ENABLE32 DRM_IOW( 0x32, drm_agp_mode32_t) +#define DRM_IOCTL_AGP_INFO32 DRM_IOR( 0x33, drm_agp_info32_t) +#define DRM_IOCTL_AGP_ALLOC32 DRM_IOWR(0x34, drm_agp_buffer32_t) +#define DRM_IOCTL_AGP_FREE32 DRM_IOW( 0x35, drm_agp_buffer32_t) +#define DRM_IOCTL_AGP_BIND32 DRM_IOW( 0x36, drm_agp_binding32_t) +#define DRM_IOCTL_AGP_UNBIND32 DRM_IOW( 0x37, drm_agp_binding32_t) + +#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t) +#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t) + +#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) + +typedef struct drm_version_32 { + int version_major; /**< Major version */ + int version_minor; /**< Minor version */ + int version_patchlevel;/**< Patch level */ + u32 name_len; /**< Length of name buffer */ + u32 name; /**< Name of driver */ + u32 date_len; /**< Length of date buffer */ + u32 date; /**< User-space buffer to hold date */ + u32 desc_len; /**< Length of desc buffer */ + u32 desc; /**< User-space buffer to hold desc */ +} drm_version32_t; + +static int compat_drm_version(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_version32_t v32; + drm_version_t __user *version; + int err; + + if (copy_from_user(&v32, (void __user *) arg, sizeof(v32))) + return -EFAULT; + + version = compat_alloc_user_space(sizeof(*version)); + if (!access_ok(VERIFY_WRITE, version, sizeof(*version))) + return -EFAULT; + if (__put_user(v32.name_len, &version->name_len) + || __put_user((void __user *)(unsigned long)v32.name, + &version->name) + || __put_user(v32.date_len, &version->date_len) + || __put_user((void __user *)(unsigned long)v32.date, + &version->date) + || __put_user(v32.desc_len, &version->desc_len) + || __put_user((void __user *)(unsigned long)v32.desc, + &version->desc)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_VERSION, (unsigned long) version); + if (err) + return err; + + if (__get_user(v32.version_major, &version->version_major) + || __get_user(v32.version_minor, &version->version_minor) + || __get_user(v32.version_patchlevel, &version->version_patchlevel) + || __get_user(v32.name_len, &version->name_len) + || __get_user(v32.date_len, &version->date_len) + || __get_user(v32.desc_len, &version->desc_len)) + return -EFAULT; + + if (copy_to_user((void __user *) arg, &v32, sizeof(v32))) + return -EFAULT; + return 0; +} + +typedef struct drm_unique32 { + u32 unique_len; /**< Length of unique */ + u32 unique; /**< Unique name for driver instantiation */ +} drm_unique32_t; + +static int compat_drm_getunique(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_unique32_t uq32; + drm_unique_t __user *u; + int err; + + if (copy_from_user(&uq32, (void __user *) arg, sizeof(uq32))) + return -EFAULT; + + u = compat_alloc_user_space(sizeof(*u)); + if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) + return -EFAULT; + if (__put_user(uq32.unique_len, &u->unique_len) + || __put_user((void __user *)(unsigned long) uq32.unique, + &u->unique)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_GET_UNIQUE, (unsigned long) u); + if (err) + return err; + + if (__get_user(uq32.unique_len, &u->unique_len)) + return -EFAULT; + if (copy_to_user((void __user *) arg, &uq32, sizeof(uq32))) + return -EFAULT; + return 0; +} + +static int compat_drm_setunique(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_unique32_t uq32; + drm_unique_t __user *u; + + if (copy_from_user(&uq32, (void __user *) arg, sizeof(uq32))) + return -EFAULT; + + u = compat_alloc_user_space(sizeof(*u)); + if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) + return -EFAULT; + if (__put_user(uq32.unique_len, &u->unique_len) + || __put_user((void __user *)(unsigned long) uq32.unique, + &u->unique)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_SET_UNIQUE, (unsigned long) u); +} + +typedef struct drm_map32 { + u32 offset; /**< Requested physical address (0 for SAREA)*/ + u32 size; /**< Requested physical size (bytes) */ + drm_map_type_t type; /**< Type of memory to map */ + drm_map_flags_t flags; /**< Flags */ + u32 handle; /**< User-space: "Handle" to pass to mmap() */ + int mtrr; /**< MTRR slot used */ +} drm_map32_t; + +static int compat_drm_getmap(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_map32_t __user *argp = (void __user *)arg; + drm_map32_t m32; + drm_map_t __user *map; + int idx, err; + void *handle; + + if (get_user(idx, &argp->offset)) + return -EFAULT; + + map = compat_alloc_user_space(sizeof(*map)); + if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) + return -EFAULT; + if (__put_user(idx, &map->offset)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_GET_MAP, (unsigned long) map); + if (err) + return err; + + if (__get_user(m32.offset, &map->offset) + || __get_user(m32.size, &map->size) + || __get_user(m32.type, &map->type) + || __get_user(m32.flags, &map->flags) + || __get_user(handle, &map->handle) + || __get_user(m32.mtrr, &map->mtrr)) + return -EFAULT; + + m32.handle = (unsigned long) handle; + if (copy_to_user(argp, &m32, sizeof(m32))) + return -EFAULT; + return 0; + +} + +static int compat_drm_addmap(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_map32_t __user *argp = (void __user *)arg; + drm_map32_t m32; + drm_map_t __user *map; + int err; + void *handle; + + if (copy_from_user(&m32, argp, sizeof(m32))) + return -EFAULT; + + map = compat_alloc_user_space(sizeof(*map)); + if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) + return -EFAULT; + if (__put_user(m32.offset, &map->offset) + || __put_user(m32.size, &map->size) + || __put_user(m32.type, &map->type) + || __put_user(m32.flags, &map->flags)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_ADD_MAP, (unsigned long) map); + if (err) + return err; + + if (__get_user(m32.offset, &map->offset) + || __get_user(m32.mtrr, &map->mtrr) + || __get_user(handle, &map->handle)) + return -EFAULT; + + m32.handle = (unsigned long) handle; + if (m32.handle != (unsigned long) handle && printk_ratelimit()) + printk(KERN_ERR "compat_drm_addmap truncated handle" + " %p for type %d offset %x\n", + handle, m32.type, m32.offset); + + if (copy_to_user(argp, &m32, sizeof(m32))) + return -EFAULT; + + return 0; +} + +static int compat_drm_rmmap(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_map32_t __user *argp = (void __user *)arg; + drm_map_t __user *map; + u32 handle; + + if (get_user(handle, &argp->handle)) + return -EFAULT; + + map = compat_alloc_user_space(sizeof(*map)); + if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) + return -EFAULT; + if (__put_user((void *)(unsigned long) handle, &map->handle)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RM_MAP, (unsigned long) map); +} + +typedef struct drm_client32 { + int idx; /**< Which client desired? */ + int auth; /**< Is client authenticated? */ + u32 pid; /**< Process ID */ + u32 uid; /**< User ID */ + u32 magic; /**< Magic */ + u32 iocs; /**< Ioctl count */ +} drm_client32_t; + +static int compat_drm_getclient(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_client32_t c32; + drm_client32_t __user *argp = (void __user *)arg; + drm_client_t __user *client; + int idx, err; + + if (get_user(idx, &argp->idx)) + return -EFAULT; + + client = compat_alloc_user_space(sizeof(*client)); + if (!access_ok(VERIFY_WRITE, client, sizeof(*client))) + return -EFAULT; + if (__put_user(idx, &client->idx)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_GET_CLIENT, (unsigned long) client); + if (err) + return err; + + if (__get_user(c32.auth, &client->auth) + || __get_user(c32.pid, &client->pid) + || __get_user(c32.uid, &client->uid) + || __get_user(c32.magic, &client->magic) + || __get_user(c32.iocs, &client->iocs)) + return -EFAULT; + + if (copy_to_user(argp, &c32, sizeof(c32))) + return -EFAULT; + return 0; +} + +typedef struct drm_stats32 { + u32 count; + struct { + u32 value; + drm_stat_type_t type; + } data[15]; +} drm_stats32_t; + +static int compat_drm_getstats(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_stats32_t s32; + drm_stats32_t __user *argp = (void __user *)arg; + drm_stats_t __user *stats; + int i, err; + + stats = compat_alloc_user_space(sizeof(*stats)); + if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats))) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_GET_STATS, (unsigned long) stats); + if (err) + return err; + + if (__get_user(s32.count, &stats->count)) + return -EFAULT; + for (i = 0; i < 15; ++i) + if (__get_user(s32.data[i].value, &stats->data[i].value) + || __get_user(s32.data[i].type, &stats->data[i].type)) + return -EFAULT; + + if (copy_to_user(argp, &s32, sizeof(s32))) + return -EFAULT; + return 0; +} + +typedef struct drm_buf_desc32 { + int count; /**< Number of buffers of this size */ + int size; /**< Size in bytes */ + int low_mark; /**< Low water mark */ + int high_mark; /**< High water mark */ + int flags; + u32 agp_start; /**< Start address in the AGP aperture */ +} drm_buf_desc32_t; + +static int compat_drm_addbufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_desc32_t __user *argp = (void __user *)arg; + drm_buf_desc_t __user *buf; + int err; + unsigned long agp_start; + + buf = compat_alloc_user_space(sizeof(*buf)); + if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)) + || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))) + return -EFAULT; + + if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start)) + || __get_user(agp_start, &argp->agp_start) + || __put_user(agp_start, &buf->agp_start)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_ADD_BUFS, (unsigned long) buf); + if (err) + return err; + + if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start)) + || __get_user(agp_start, &buf->agp_start) + || __put_user(agp_start, &argp->agp_start)) + return -EFAULT; + + return 0; +} + +static int compat_drm_markbufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_desc32_t b32; + drm_buf_desc32_t __user *argp = (void __user *)arg; + drm_buf_desc_t __user *buf; + + if (copy_from_user(&b32, argp, sizeof(b32))) + return -EFAULT; + + buf = compat_alloc_user_space(sizeof(*buf)); + if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))) + return -EFAULT; + + if (__put_user(b32.size, &buf->size) + || __put_user(b32.low_mark, &buf->low_mark) + || __put_user(b32.high_mark, &buf->high_mark)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_MARK_BUFS, (unsigned long) buf); +} + +typedef struct drm_buf_info32 { + int count; /**< Entries in list */ + u32 list; +} drm_buf_info32_t; + +static int compat_drm_infobufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_info32_t req32; + drm_buf_info32_t __user *argp = (void __user *)arg; + drm_buf_desc32_t __user *to; + drm_buf_info_t __user *request; + drm_buf_desc_t __user *list; + size_t nbytes; + int i, err; + int count, actual; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + count = req32.count; + to = (drm_buf_desc32_t __user *)(unsigned long) req32.list; + if (count < 0) + count = 0; + if (count > 0 + && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t))) + return -EFAULT; + + nbytes = sizeof(*request) + count * sizeof(drm_buf_desc_t); + request = compat_alloc_user_space(nbytes); + if (!access_ok(VERIFY_WRITE, request, nbytes)) + return -EFAULT; + list = (drm_buf_desc_t *) (request + 1); + + if (__put_user(count, &request->count) + || __put_user(list, &request->list)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_INFO_BUFS, (unsigned long) request); + if (err) + return err; + + if (__get_user(actual, &request->count)) + return -EFAULT; + if (count >= actual) + for (i = 0; i < actual; ++i) + if (__copy_in_user(&to[i], &list[i], + offsetof(drm_buf_desc_t, flags))) + return -EFAULT; + + if (__put_user(actual, &argp->count)) + return -EFAULT; + + return 0; +} + +typedef struct drm_buf_pub32 { + int idx; /**< Index into the master buffer list */ + int total; /**< Buffer size */ + int used; /**< Amount of buffer in use (for DMA) */ + u32 address; /**< Address of buffer */ +} drm_buf_pub32_t; + +typedef struct drm_buf_map32 { + int count; /**< Length of the buffer list */ + u32 virtual; /**< Mmap'd area in user-virtual */ + u32 list; /**< Buffer information */ +} drm_buf_map32_t; + +static int compat_drm_mapbufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_map32_t __user *argp = (void __user *)arg; + drm_buf_map32_t req32; + drm_buf_pub32_t __user *list32; + drm_buf_map_t __user *request; + drm_buf_pub_t __user *list; + int i, err; + int count, actual; + size_t nbytes; + void __user *addr; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + count = req32.count; + list32 = (void __user *)(unsigned long)req32.list; + + if (count < 0) + return -EINVAL; + nbytes = sizeof(*request) + count * sizeof(drm_buf_pub_t); + request = compat_alloc_user_space(nbytes); + if (!access_ok(VERIFY_WRITE, request, nbytes)) + return -EFAULT; + list = (drm_buf_pub_t *) (request + 1); + + if (__put_user(count, &request->count) + || __put_user(list, &request->list)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_MAP_BUFS, (unsigned long) request); + if (err) + return err; + + if (__get_user(actual, &request->count)) + return -EFAULT; + if (count >= actual) + for (i = 0; i < actual; ++i) + if (__copy_in_user(&list32[i], &list[i], + offsetof(drm_buf_pub_t, address)) + || __get_user(addr, &list[i].address) + || __put_user((unsigned long) addr, + &list32[i].address)) + return -EFAULT; + + if (__put_user(actual, &argp->count) + || __get_user(addr, &request->virtual) + || __put_user((unsigned long) addr, &argp->virtual)) + return -EFAULT; + + return 0; +} + +typedef struct drm_buf_free32 { + int count; + u32 list; +} drm_buf_free32_t; + +static int compat_drm_freebufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_free32_t req32; + drm_buf_free_t __user *request; + drm_buf_free32_t __user *argp = (void __user *)arg; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) + return -EFAULT; + if (__put_user(req32.count, &request->count) + || __put_user((int __user *)(unsigned long) req32.list, + &request->list)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_FREE_BUFS, (unsigned long) request); +} + +typedef struct drm_ctx_priv_map32 { + unsigned int ctx_id; /**< Context requesting private mapping */ + u32 handle; /**< Handle of map */ +} drm_ctx_priv_map32_t; + +static int compat_drm_setsareactx(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_priv_map32_t req32; + drm_ctx_priv_map_t __user *request; + drm_ctx_priv_map32_t __user *argp = (void __user *)arg; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) + return -EFAULT; + if (__put_user(req32.ctx_id, &request->ctx_id) + || __put_user((void *)(unsigned long) req32.handle, + &request->handle)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_SET_SAREA_CTX, (unsigned long) request); +} + +static int compat_drm_getsareactx(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_priv_map_t __user *request; + drm_ctx_priv_map32_t __user *argp = (void __user *)arg; + int err; + unsigned int ctx_id; + void *handle; + + if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp)) + || __get_user(ctx_id, &argp->ctx_id)) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) + return -EFAULT; + if (__put_user(ctx_id, &request->ctx_id)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_GET_SAREA_CTX, (unsigned long) request); + if (err) + return err; + + if (__get_user(handle, &request->handle) + || __put_user((unsigned long) handle, &argp->handle)) + return -EFAULT; + + return 0; +} + +typedef struct drm_ctx_res32 { + int count; + u32 contexts; +} drm_ctx_res32_t; + +static int compat_drm_resctx(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_res32_t __user *argp = (void __user *)arg; + drm_ctx_res32_t res32; + drm_ctx_res_t __user *res; + int err; + + if (copy_from_user(&res32, argp, sizeof(res32))) + return -EFAULT; + + res = compat_alloc_user_space(sizeof(*res)); + if (!access_ok(VERIFY_WRITE, res, sizeof(*res))) + return -EFAULT; + if (__put_user(res32.count, &res->count) + || __put_user((drm_ctx_t __user *)(unsigned long) res32.contexts, + &res->contexts)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RES_CTX, (unsigned long) res); + if (err) + return err; + + if (__get_user(res32.count, &res->count) + || __put_user(res32.count, &argp->count)) + return -EFAULT; + + return 0; +} + +typedef struct drm_dma32 { + int context; /**< Context handle */ + int send_count; /**< Number of buffers to send */ + u32 send_indices; /**< List of handles to buffers */ + u32 send_sizes; /**< Lengths of data to send */ + drm_dma_flags_t flags; /**< Flags */ + int request_count; /**< Number of buffers requested */ + int request_size; /**< Desired size for buffers */ + u32 request_indices; /**< Buffer information */ + u32 request_sizes; + int granted_count; /**< Number of buffers granted */ +} drm_dma32_t; + +static int compat_drm_dma(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_dma32_t d32; + drm_dma32_t __user *argp = (void __user *) arg; + drm_dma_t __user *d; + int err; + + if (copy_from_user(&d32, argp, sizeof(d32))) + return -EFAULT; + + d = compat_alloc_user_space(sizeof(*d)); + if (!access_ok(VERIFY_WRITE, d, sizeof(*d))) + return -EFAULT; + + if (__put_user(d32.context, &d->context) + || __put_user(d32.send_count, &d->send_count) + || __put_user((int __user *)(unsigned long) d32.send_indices, + &d->send_indices) + || __put_user((int __user *)(unsigned long) d32.send_sizes, + &d->send_sizes) + || __put_user(d32.flags, &d->flags) + || __put_user(d32.request_count, &d->request_count) + || __put_user((int __user *)(unsigned long) d32.request_indices, + &d->request_indices) + || __put_user((int __user *)(unsigned long) d32.request_sizes, + &d->request_sizes)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_DMA, (unsigned long) d); + if (err) + return err; + + if (__get_user(d32.request_size, &d->request_size) + || __get_user(d32.granted_count, &d->granted_count) + || __put_user(d32.request_size, &argp->request_size) + || __put_user(d32.granted_count, &argp->granted_count)) + return -EFAULT; + + return 0; +} + +#if __OS_HAS_AGP +typedef struct drm_agp_mode32 { + u32 mode; /**< AGP mode */ +} drm_agp_mode32_t; + +static int compat_drm_agp_enable(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_mode32_t __user *argp = (void __user *)arg; + drm_agp_mode32_t m32; + drm_agp_mode_t __user *mode; + + if (get_user(m32.mode, &argp->mode)) + return -EFAULT; + + mode = compat_alloc_user_space(sizeof(*mode)); + if (put_user(m32.mode, &mode->mode)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_AGP_ENABLE, (unsigned long) mode); +} + +typedef struct drm_agp_info32 { + int agp_version_major; + int agp_version_minor; + u32 mode; + u32 aperture_base; /* physical address */ + u32 aperture_size; /* bytes */ + u32 memory_allowed; /* bytes */ + u32 memory_used; + + /* PCI information */ + unsigned short id_vendor; + unsigned short id_device; +} drm_agp_info32_t; + +static int compat_drm_agp_info(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_info32_t __user *argp = (void __user *)arg; + drm_agp_info32_t i32; + drm_agp_info_t __user *info; + int err; + + info = compat_alloc_user_space(sizeof(*info)); + if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_AGP_INFO, (unsigned long) info); + if (err) + return err; + + if (__get_user(i32.agp_version_major, &info->agp_version_major) + || __get_user(i32.agp_version_minor, &info->agp_version_minor) + || __get_user(i32.mode, &info->mode) + || __get_user(i32.aperture_base, &info->aperture_base) + || __get_user(i32.aperture_size, &info->aperture_size) + || __get_user(i32.memory_allowed, &info->memory_allowed) + || __get_user(i32.memory_used, &info->memory_used) + || __get_user(i32.id_vendor, &info->id_vendor) + || __get_user(i32.id_device, &info->id_device)) + return -EFAULT; + + if (copy_to_user(argp, &i32, sizeof(i32))) + return -EFAULT; + + return 0; +} + +typedef struct drm_agp_buffer32 { + u32 size; /**< In bytes -- will round to page boundary */ + u32 handle; /**< Used for binding / unbinding */ + u32 type; /**< Type of memory to allocate */ + u32 physical; /**< Physical used by i810 */ +} drm_agp_buffer32_t; + +static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_buffer32_t __user *argp = (void __user *)arg; + drm_agp_buffer32_t req32; + drm_agp_buffer_t __user *request; + int err; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.size, &request->size) + || __put_user(req32.type, &request->type)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_AGP_ALLOC, (unsigned long) request); + if (err) + return err; + + if (__get_user(req32.handle, &request->handle) + || __get_user(req32.physical, &request->physical) + || copy_to_user(argp, &req32, sizeof(req32))) { + drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_AGP_FREE, (unsigned long) request); + return -EFAULT; + } + + return 0; +} + +static int compat_drm_agp_free(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_buffer32_t __user *argp = (void __user *)arg; + drm_agp_buffer_t __user *request; + u32 handle; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || get_user(handle, &argp->handle) + || __put_user(handle, &request->handle)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_AGP_FREE, (unsigned long) request); +} + +typedef struct drm_agp_binding32 { + u32 handle; /**< From drm_agp_buffer */ + u32 offset; /**< In bytes -- will round to page boundary */ +} drm_agp_binding32_t; + +static int compat_drm_agp_bind(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_binding32_t __user *argp = (void __user *)arg; + drm_agp_binding32_t req32; + drm_agp_binding_t __user *request; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.handle, &request->handle) + || __put_user(req32.offset, &request->offset)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_AGP_BIND, (unsigned long) request); +} + +static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_binding32_t __user *argp = (void __user *)arg; + drm_agp_binding_t __user *request; + u32 handle; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || get_user(handle, &argp->handle) + || __put_user(handle, &request->handle)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_AGP_UNBIND, (unsigned long) request); +} +#endif /* __OS_HAS_AGP */ + +typedef struct drm_scatter_gather32 { + u32 size; /**< In bytes -- will round to page boundary */ + u32 handle; /**< Used for mapping / unmapping */ +} drm_scatter_gather32_t; + +static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_scatter_gather32_t __user *argp = (void __user *)arg; + drm_scatter_gather_t __user *request; + int err; + unsigned long x; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) + || __get_user(x, &argp->size) + || __put_user(x, &request->size)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_SG_ALLOC, (unsigned long) request); + if (err) + return err; + + /* XXX not sure about the handle conversion here... */ + if (__get_user(x, &request->handle) + || __put_user(x >> PAGE_SHIFT, &argp->handle)) + return -EFAULT; + + return 0; +} + +static int compat_drm_sg_free(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_scatter_gather32_t __user *argp = (void __user *)arg; + drm_scatter_gather_t __user *request; + unsigned long x; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) + || __get_user(x, &argp->handle) + || __put_user(x << PAGE_SHIFT, &request->handle)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_SG_FREE, (unsigned long) request); +} + +struct drm_wait_vblank_request32 { + drm_vblank_seq_type_t type; + unsigned int sequence; + u32 signal; +}; + +struct drm_wait_vblank_reply32 { + drm_vblank_seq_type_t type; + unsigned int sequence; + s32 tval_sec; + s32 tval_usec; +}; + +typedef union drm_wait_vblank32 { + struct drm_wait_vblank_request32 request; + struct drm_wait_vblank_reply32 reply; +} drm_wait_vblank32_t; + +static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_wait_vblank32_t __user *argp = (void __user *)arg; + drm_wait_vblank32_t req32; + drm_wait_vblank_t __user *request; + int err; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.request.type, &request->request.type) + || __put_user(req32.request.sequence, &request->request.sequence) + || __put_user(req32.request.signal, &request->request.signal)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_WAIT_VBLANK, (unsigned long) request); + if (err) + return err; + + if (__get_user(req32.reply.type, &request->reply.type) + || __get_user(req32.reply.sequence, &request->reply.sequence) + || __get_user(req32.reply.tval_sec, &request->reply.tval_sec) + || __get_user(req32.reply.tval_usec, &request->reply.tval_usec)) + return -EFAULT; + + if (copy_to_user(argp, &req32, sizeof(req32))) + return -EFAULT; + + return 0; +} + +drm_ioctl_compat_t *drm_compat_ioctls[] = { + [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient, + [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats, + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs, + [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap, + [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx, + [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx, + [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma, +#if __OS_HAS_AGP + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind, +#endif + [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc, + [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free, + [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, +}; + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/drm. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn; + int ret; + + if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls)) + return -ENOTTY; + + fn = drm_compat_ioctls[nr]; + + lock_kernel(); /* XXX for now */ + if (fn != NULL) + ret = (*fn)(filp, cmd, arg); + else + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} +EXPORT_SYMBOL(drm_compat_ioctl); diff --git a/nx-X11/extras/drm/linux-core/drm_ioctl.c b/nx-X11/extras/drm/linux-core/drm_ioctl.c new file mode 100644 index 000000000..e0998d53d --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_ioctl.c @@ -0,0 +1,368 @@ +/** + * \file drm_ioctl.c + * IOCTL processing for DRM + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm_core.h" + +#include "linux/pci.h" + +/** + * Get the bus id. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_unique structure. + * \return zero on success or a negative number on failure. + * + * Copies the bus id from drm_device::unique into user space. + */ +int drm_getunique(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_unique_t __user *argp = (void __user *)arg; + drm_unique_t u; + + if (copy_from_user(&u, argp, sizeof(u))) + return -EFAULT; + if (u.unique_len >= dev->unique_len) { + if (copy_to_user(u.unique, dev->unique, dev->unique_len)) + return -EFAULT; + } + u.unique_len = dev->unique_len; + if (copy_to_user(argp, &u, sizeof(u))) + return -EFAULT; + return 0; +} + +/** + * Set the bus id. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_unique structure. + * \return zero on success or a negative number on failure. + * + * Copies the bus id from userspace into drm_device::unique, and verifies that + * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated + * in interface version 1.1 and will return EBUSY when setversion has requested + * version 1.1 or greater. + */ +int drm_setunique(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_unique_t u; + int domain, bus, slot, func, ret; + + if (dev->unique_len || dev->unique) + return -EBUSY; + + if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u))) + return -EFAULT; + + if (!u.unique_len || u.unique_len > 1024) + return -EINVAL; + + dev->unique_len = u.unique_len; + dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER); + if (!dev->unique) + return -ENOMEM; + if (copy_from_user(dev->unique, u.unique, dev->unique_len)) + return -EFAULT; + + dev->unique[dev->unique_len] = '\0'; + + dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + strlen(dev->unique) + 2, + DRM_MEM_DRIVER); + if (!dev->devname) + return -ENOMEM; + + sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique); + + /* Return error if the busid submitted doesn't match the device's actual + * busid. + */ + ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); + if (ret != 3) + return DRM_ERR(EINVAL); + domain = bus >> 8; + bus &= 0xff; + + if ((domain != dev->pci_domain) || + (bus != dev->pci_bus) || + (slot != dev->pci_slot) || (func != dev->pci_func)) + return -EINVAL; + + return 0; +} + +static int drm_set_busid(drm_device_t * dev) +{ + int len; + if (dev->unique != NULL) + return EBUSY; + + dev->unique_len = 40; + dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); + if (dev->unique == NULL) + return ENOMEM; + + len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", + dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func); + if (len > dev->unique_len) + DRM_ERROR("buffer overflow"); + + dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 2, + DRM_MEM_DRIVER); + if (dev->devname == NULL) + return ENOMEM; + + sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique); + + return 0; +} + +/** + * Get a mapping information. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_map structure. + * + * \return zero on success or a negative number on failure. + * + * Searches for the mapping with the specified offset and copies its information + * into userspace + */ +int drm_getmap(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_map_t __user *argp = (void __user *)arg; + drm_map_t map; + drm_map_list_t *r_list = NULL; + struct list_head *list; + int idx; + int i; + + if (copy_from_user(&map, argp, sizeof(map))) + return -EFAULT; + idx = map.offset; + + down(&dev->struct_sem); + if (idx < 0) { + up(&dev->struct_sem); + return -EINVAL; + } + + i = 0; + list_for_each(list, &dev->maplist->head) { + if (i == idx) { + r_list = list_entry(list, drm_map_list_t, head); + break; + } + i++; + } + if (!r_list || !r_list->map) { + up(&dev->struct_sem); + return -EINVAL; + } + + map.offset = r_list->map->offset; + map.size = r_list->map->size; + map.type = r_list->map->type; + map.flags = r_list->map->flags; + map.handle = (void *)(unsigned long) r_list->user_token; + map.mtrr = r_list->map->mtrr; + up(&dev->struct_sem); + + if (copy_to_user(argp, &map, sizeof(map))) + return -EFAULT; + return 0; +} + +/** + * Get client information. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_client structure. + * + * \return zero on success or a negative number on failure. + * + * Searches for the client with the specified index and copies its information + * into userspace + */ +int drm_getclient(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_client_t __user *argp = (void __user *)arg; + drm_client_t client; + drm_file_t *pt; + int idx; + int i; + + if (copy_from_user(&client, argp, sizeof(client))) + return -EFAULT; + idx = client.idx; + down(&dev->struct_sem); + for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ; + + if (!pt) { + up(&dev->struct_sem); + return -EINVAL; + } + client.auth = pt->authenticated; + client.pid = pt->pid; + client.uid = pt->uid; + client.magic = pt->magic; + client.iocs = pt->ioctl_count; + up(&dev->struct_sem); + + if (copy_to_user(argp, &client, sizeof(client))) + return -EFAULT; + return 0; +} + +/** + * Get statistics information. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_stats structure. + * + * \return zero on success or a negative number on failure. + */ +int drm_getstats(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_stats_t stats; + int i; + + memset(&stats, 0, sizeof(stats)); + + down(&dev->struct_sem); + + for (i = 0; i < dev->counters; i++) { + if (dev->types[i] == _DRM_STAT_LOCK) + stats.data[i].value + = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); + else + stats.data[i].value = atomic_read(&dev->counts[i]); + stats.data[i].type = dev->types[i]; + } + + stats.count = dev->counters; + + up(&dev->struct_sem); + + if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; +} + +/** + * Setversion ioctl. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_lock structure. + * \return zero on success or negative number on failure. + * + * Sets the requested interface version + */ +int drm_setversion(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_set_version_t sv; + drm_set_version_t retv; + int if_version; + drm_set_version_t __user *argp = (void __user *)data; + + DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv)); + + retv.drm_di_major = DRM_IF_MAJOR; + retv.drm_di_minor = DRM_IF_MINOR; + retv.drm_dd_major = dev->driver->major; + retv.drm_dd_minor = dev->driver->minor; + + DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv)); + + if (sv.drm_di_major != -1) { + if (sv.drm_di_major != DRM_IF_MAJOR || + sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) + return EINVAL; + if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor); + dev->if_version = DRM_MAX(if_version, dev->if_version); + if (sv.drm_di_minor >= 1) { + /* + * Version 1.1 includes tying of DRM to specific device + */ + drm_set_busid(dev); + } + } + + if (sv.drm_dd_major != -1) { + if (sv.drm_dd_major != dev->driver->major || + sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver->minor) + return EINVAL; + + if (dev->driver->set_version) + dev->driver->set_version(dev, &sv); + } + return 0; +} + +/** No-op ioctl. */ +int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + DRM_DEBUG("\n"); + return 0; +} diff --git a/nx-X11/extras/drm/linux-core/drm_irq.c b/nx-X11/extras/drm/linux-core/drm_irq.c new file mode 100644 index 000000000..7f21205d5 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_irq.c @@ -0,0 +1,370 @@ +/** + * \file drm_irq.c + * IRQ support + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +#include <linux/interrupt.h> /* For task queue support */ + +/** + * Get interrupt from bus id. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_irq_busid structure. + * \return zero on success or a negative number on failure. + * + * Finds the PCI device with the specified bus id and gets its IRQ number. + * This IOCTL is deprecated, and will now return EINVAL for any busid not equal + * to that of the device that this DRM instance attached to. + */ +int drm_irq_by_busid(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_irq_busid_t __user *argp = (void __user *)arg; + drm_irq_busid_t p; + + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) + return -EINVAL; + + if (copy_from_user(&p, argp, sizeof(p))) + return -EFAULT; + + if ((p.busnum >> 8) != dev->pci_domain || + (p.busnum & 0xff) != dev->pci_bus || + p.devnum != dev->pci_slot || p.funcnum != dev->pci_func) + return -EINVAL; + + p.irq = dev->irq; + + DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq); + if (copy_to_user(argp, &p, sizeof(p))) + return -EFAULT; + return 0; +} + +/** + * Install IRQ handler. + * + * \param dev DRM device. + * + * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver + * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions + * before and after the installation. + */ +static int drm_irq_install(drm_device_t * dev) +{ + int ret; + unsigned long sh_flags = 0; + + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) + return -EINVAL; + + if (dev->irq == 0) + return -EINVAL; + + down(&dev->struct_sem); + + /* Driver must have been initialized */ + if (!dev->dev_private) { + up(&dev->struct_sem); + return -EINVAL; + } + + if (dev->irq_enabled) { + up(&dev->struct_sem); + return -EBUSY; + } + dev->irq_enabled = 1; + up(&dev->struct_sem); + + DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq); + + if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) { + init_waitqueue_head(&dev->vbl_queue); + + spin_lock_init(&dev->vbl_lock); + + INIT_LIST_HEAD(&dev->vbl_sigs.head); + + dev->vbl_pending = 0; + } + + /* Before installing handler */ + dev->driver->irq_preinstall(dev); + + /* Install handler */ + if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) + sh_flags = SA_SHIRQ; + + ret = request_irq(dev->irq, dev->driver->irq_handler, + sh_flags, dev->devname, dev); + if (ret < 0) { + down(&dev->struct_sem); + dev->irq_enabled = 0; + up(&dev->struct_sem); + return ret; + } + + /* After installing handler */ + dev->driver->irq_postinstall(dev); + + return 0; +} + +/** + * Uninstall the IRQ handler. + * + * \param dev DRM device. + * + * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. + */ +int drm_irq_uninstall(drm_device_t * dev) +{ + int irq_enabled; + + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) + return -EINVAL; + + down(&dev->struct_sem); + irq_enabled = dev->irq_enabled; + dev->irq_enabled = 0; + up(&dev->struct_sem); + + if (!irq_enabled) + return -EINVAL; + + DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq); + + dev->driver->irq_uninstall(dev); + + free_irq(dev->irq, dev); + + return 0; +} +EXPORT_SYMBOL(drm_irq_uninstall); + +/** + * IRQ control ioctl. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_control structure. + * \return zero on success or a negative number on failure. + * + * Calls irq_install() or irq_uninstall() according to \p arg. + */ +int drm_control(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_control_t ctl; + + /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ + + if (copy_from_user(&ctl, (drm_control_t __user *) arg, sizeof(ctl))) + return -EFAULT; + + switch (ctl.func) { + case DRM_INST_HANDLER: + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) + return 0; + if (dev->if_version < DRM_IF_VERSION(1, 2) && + ctl.irq != dev->irq) + return -EINVAL; + return drm_irq_install(dev); + case DRM_UNINST_HANDLER: + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) + return 0; + return drm_irq_uninstall(dev); + default: + return -EINVAL; + } +} + +/** + * Wait for VBLANK. + * + * \param inode device inode. + * \param filp file pointer.rm. + * \param cmd command. + * \param data user argument, pointing to a drm_wait_vblank structure. + * \return zero on success or a negative number on failure. + * + * Verifies the IRQ is installed + * + * If a signal is requested checks if this task has already scheduled the same signal + * for the same vblank sequence number - nothing to be done in + * that case. If the number of tasks waiting for the interrupt exceeds 100 the + * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this + * task. + * + * If a signal is not requested, then calls vblank_wait(). + */ +int drm_wait_vblank(DRM_IOCTL_ARGS) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_wait_vblank_t __user *argp = (void __user *)data; + drm_wait_vblank_t vblwait; + struct timeval now; + int ret = 0; + unsigned int flags; + + if (!drm_core_check_feature(dev, DRIVER_IRQ_VBL)) + return -EINVAL; + + if ((!dev->irq) || (!dev->irq_enabled)) + return -EINVAL; + + DRM_COPY_FROM_USER_IOCTL(vblwait, argp, sizeof(vblwait)); + + switch (vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK) { + case _DRM_VBLANK_RELATIVE: + vblwait.request.sequence += atomic_read(&dev->vbl_received); + vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; + case _DRM_VBLANK_ABSOLUTE: + break; + default: + return -EINVAL; + } + + flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + + if (flags & _DRM_VBLANK_SIGNAL) { + unsigned long irqflags; + drm_vbl_sig_t *vbl_sig; + + vblwait.reply.sequence = atomic_read(&dev->vbl_received); + + spin_lock_irqsave(&dev->vbl_lock, irqflags); + + /* Check if this task has already scheduled the same signal + * for the same vblank sequence number; nothing to be done in + * that case + */ + list_for_each_entry(vbl_sig, &dev->vbl_sigs.head, head) { + if (vbl_sig->sequence == vblwait.request.sequence + && vbl_sig->info.si_signo == vblwait.request.signal + && vbl_sig->task == current) { + spin_unlock_irqrestore(&dev->vbl_lock, + irqflags); + goto done; + } + } + + if (dev->vbl_pending >= 100) { + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + return -EBUSY; + } + + dev->vbl_pending++; + + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + + if (! + (vbl_sig = + drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) { + return -ENOMEM; + } + + memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); + + vbl_sig->sequence = vblwait.request.sequence; + vbl_sig->info.si_signo = vblwait.request.signal; + vbl_sig->task = current; + + spin_lock_irqsave(&dev->vbl_lock, irqflags); + + list_add_tail((struct list_head *)vbl_sig, &dev->vbl_sigs.head); + + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + } else { + if (dev->driver->vblank_wait) + ret = + dev->driver->vblank_wait(dev, + &vblwait.request.sequence); + + do_gettimeofday(&now); + vblwait.reply.tval_sec = now.tv_sec; + vblwait.reply.tval_usec = now.tv_usec; + } + + done: + DRM_COPY_TO_USER_IOCTL(argp, vblwait, sizeof(vblwait)); + + return ret; +} + +/** + * Send the VBLANK signals. + * + * \param dev DRM device. + * + * Sends a signal for each task in drm_device::vbl_sigs and empties the list. + * + * If a signal is not requested, then calls vblank_wait(). + */ +void drm_vbl_send_signals(drm_device_t * dev) +{ + struct list_head *list, *tmp; + drm_vbl_sig_t *vbl_sig; + unsigned int vbl_seq = atomic_read(&dev->vbl_received); + unsigned long flags; + + spin_lock_irqsave(&dev->vbl_lock, flags); + + list_for_each_safe(list, tmp, &dev->vbl_sigs.head) { + vbl_sig = list_entry(list, drm_vbl_sig_t, head); + if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { + vbl_sig->info.si_code = vbl_seq; + send_sig_info(vbl_sig->info.si_signo, &vbl_sig->info, + vbl_sig->task); + + list_del(list); + + drm_free(vbl_sig, sizeof(*vbl_sig), DRM_MEM_DRIVER); + + dev->vbl_pending--; + } + } + + spin_unlock_irqrestore(&dev->vbl_lock, flags); +} +EXPORT_SYMBOL(drm_vbl_send_signals); diff --git a/nx-X11/extras/drm/linux-core/drm_lock.c b/nx-X11/extras/drm/linux-core/drm_lock.c new file mode 100644 index 000000000..5557f30fa --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_lock.c @@ -0,0 +1,305 @@ +/** + * \file drm_lock.c + * IOCTLs for locking + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +static int drm_lock_transfer(drm_device_t * dev, + __volatile__ unsigned int *lock, unsigned int context); +static int drm_notifier(void *priv); + +/** + * Lock ioctl. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_lock structure. + * \return zero on success or negative number on failure. + * + * Add the current task to the lock wait queue, and attempt to take to lock. + */ +int drm_lock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + DECLARE_WAITQUEUE(entry, current); + drm_lock_t lock; + int ret = 0; + + ++priv->lock_count; + + if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) + return -EFAULT; + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", + lock.context, current->pid, + dev->lock.hw_lock->lock, lock.flags); + + if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) + if (lock.context < 0) + return -EINVAL; + + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + ret = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { + dev->lock.filp = filp; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); + break; /* Got lock */ + } + + /* Contention */ + schedule(); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + + DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); + if (ret) return ret; + + sigemptyset(&dev->sigmask); + sigaddset(&dev->sigmask, SIGSTOP); + sigaddset(&dev->sigmask, SIGTSTP); + sigaddset(&dev->sigmask, SIGTTIN); + sigaddset(&dev->sigmask, SIGTTOU); + dev->sigdata.context = lock.context; + dev->sigdata.lock = dev->lock.hw_lock; + block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); + + if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) + dev->driver->dma_ready(dev); + + if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { + if (dev->driver->dma_quiescent(dev)) { + DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context); + return DRM_ERR(EBUSY); + } + } + + if (dev->driver->kernel_context_switch + && dev->last_context != lock.context) { + dev->driver->kernel_context_switch(dev, dev->last_context, + lock.context); + } + + return 0; +} + +/** + * Unlock ioctl. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_lock structure. + * \return zero on success or negative number on failure. + * + * Transfer and free the lock. + */ +int drm_unlock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_lock_t lock; + + if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) + return -EFAULT; + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); + + if (dev->driver->kernel_context_switch_unlock) + dev->driver->kernel_context_switch_unlock(dev); + else { + drm_lock_transfer(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT); + + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + } + + unblock_all_signals(); + return 0; +} + +/** + * Take the heavyweight lock. + * + * \param lock lock pointer. + * \param context locking context. + * \return one if the lock is held, or zero otherwise. + * + * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. + */ +int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) +{ + unsigned int old, new, prev; + + do { + old = *lock; + if (old & _DRM_LOCK_HELD) + new = old | _DRM_LOCK_CONT; + else + new = context | _DRM_LOCK_HELD; + prev = cmpxchg(lock, old, new); + } while (prev != old); + if (_DRM_LOCKING_CONTEXT(old) == context) { + if (old & _DRM_LOCK_HELD) { + if (context != DRM_KERNEL_CONTEXT) { + DRM_ERROR("%d holds heavyweight lock\n", + context); + } + return 0; + } + } + if (new == (context | _DRM_LOCK_HELD)) { + /* Have lock */ + return 1; + } + return 0; +} + +/** + * This takes a lock forcibly and hands it to context. Should ONLY be used + * inside *_unlock to give lock to kernel before calling *_dma_schedule. + * + * \param dev DRM device. + * \param lock lock pointer. + * \param context locking context. + * \return always one. + * + * Resets the lock file pointer. + * Marks the lock as held by the given context, via the \p cmpxchg instruction. + */ +static int drm_lock_transfer(drm_device_t * dev, + __volatile__ unsigned int *lock, unsigned int context) +{ + unsigned int old, new, prev; + + dev->lock.filp = NULL; + do { + old = *lock; + new = context | _DRM_LOCK_HELD; + prev = cmpxchg(lock, old, new); + } while (prev != old); + return 1; +} + +/** + * Free lock. + * + * \param dev DRM device. + * \param lock lock. + * \param context context. + * + * Resets the lock file pointer. + * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task + * waiting on the lock queue. + */ +int drm_lock_free(drm_device_t * dev, + __volatile__ unsigned int *lock, unsigned int context) +{ + unsigned int old, new, prev; + + dev->lock.filp = NULL; + do { + old = *lock; + new = 0; + prev = cmpxchg(lock, old, new); + } while (prev != old); + if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { + DRM_ERROR("%d freed heavyweight lock held by %d\n", + context, _DRM_LOCKING_CONTEXT(old)); + return 1; + } + wake_up_interruptible(&dev->lock.lock_queue); + return 0; +} + +/** + * If we get here, it means that the process has called DRM_IOCTL_LOCK + * without calling DRM_IOCTL_UNLOCK. + * + * If the lock is not held, then let the signal proceed as usual. If the lock + * is held, then set the contended flag and keep the signal blocked. + * + * \param priv pointer to a drm_sigdata structure. + * \return one if the signal should be delivered normally, or zero if the + * signal should be blocked. + */ +static int drm_notifier(void *priv) +{ + drm_sigdata_t *s = (drm_sigdata_t *) priv; + unsigned int old, new, prev; + + /* Allow signal delivery if lock isn't held */ + if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) + || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) + return 1; + + /* Otherwise, set flag to force call to + drmUnlock */ + do { + old = s->lock->lock; + new = old | _DRM_LOCK_CONT; + prev = cmpxchg(&s->lock->lock, old, new); + } while (prev != old); + return 0; +} diff --git a/nx-X11/extras/drm/linux-core/drm_memory.c b/nx-X11/extras/drm/linux-core/drm_memory.c new file mode 100644 index 000000000..9125cd475 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_memory.c @@ -0,0 +1,184 @@ +/** + * \file drm_memory.c + * Memory management wrappers for DRM + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/config.h> +#include <linux/highmem.h> +#include "drmP.h" + +#ifndef DEBUG_MEMORY + +/** No-op. */ +void drm_mem_init(void) +{ +} + +/** + * Called when "/proc/dri/%dev%/mem" is read. + * + * \param buf output buffer. + * \param start start of output data. + * \param offset requested start offset. + * \param len requested number of bytes. + * \param eof whether there is no more data to return. + * \param data private data. + * \return number of written bytes. + * + * No-op. + */ +int drm_mem_info(char *buf, char **start, off_t offset, + int len, int *eof, void *data) +{ + return 0; +} + +/** Wrapper around kmalloc() */ +void *drm_calloc(size_t nmemb, size_t size, int area) +{ + void *addr; + + addr = kmalloc(size * nmemb, GFP_KERNEL); + if (addr != NULL) + memset((void *)addr, 0, size * nmemb); + + return addr; +} +EXPORT_SYMBOL(drm_calloc); + +/** Wrapper around kmalloc() and kfree() */ +void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) +{ + void *pt; + + if (!(pt = kmalloc(size, GFP_KERNEL))) + return NULL; + if (oldpt && oldsize) { + memcpy(pt, oldpt, oldsize); + kfree(oldpt); + } + return pt; +} + +/** + * Allocate pages. + * + * \param order size order. + * \param area memory area. (Not used.) + * \return page address on success, or zero on failure. + * + * Allocate and reserve free pages. + */ +unsigned long drm_alloc_pages(int order, int area) +{ + unsigned long address; + unsigned long bytes = PAGE_SIZE << order; + unsigned long addr; + unsigned int sz; + + address = __get_free_pages(GFP_KERNEL, order); + if (!address) + return 0; + + /* Zero */ + memset((void *)address, 0, bytes); + + /* Reserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + SetPageReserved(virt_to_page(addr)); + } + + return address; +} + +/** + * Free pages. + * + * \param address address of the pages to free. + * \param order size order. + * \param area memory area. (Not used.) + * + * Unreserve and free pages allocated by alloc_pages(). + */ +void drm_free_pages(unsigned long address, int order, int area) +{ + unsigned long bytes = PAGE_SIZE << order; + unsigned long addr; + unsigned int sz; + + if (!address) + return; + + /* Unreserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + } + + free_pages(address, order); +} + +#if __OS_HAS_AGP +/** Wrapper around agp_allocate_memory() */ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) +DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +{ + return drm_agp_allocate_memory(pages, type); +} +#else +DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +{ + return drm_agp_allocate_memory(dev->agp->bridge, pages, type); +} +#endif + +/** Wrapper around agp_free_memory() */ +int drm_free_agp(DRM_AGP_MEM * handle, int pages) +{ + return drm_agp_free_memory(handle) ? 0 : -EINVAL; +} + +/** Wrapper around agp_bind_memory() */ +int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) +{ + return drm_agp_bind_memory(handle, start); +} + +/** Wrapper around agp_unbind_memory() */ +int drm_unbind_agp(DRM_AGP_MEM * handle) +{ + return drm_agp_unbind_memory(handle); +} +#endif /* agp */ +#endif /* debug_memory */ diff --git a/nx-X11/extras/drm/linux-core/drm_memory.h b/nx-X11/extras/drm/linux-core/drm_memory.h new file mode 100644 index 000000000..4a4fd5c3c --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_memory.h @@ -0,0 +1,234 @@ +/** + * \file drm_memory.h + * Memory management wrappers for DRM + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/config.h> +#include <linux/highmem.h> +#include <linux/vmalloc.h> +#include "drmP.h" + +/** + * Cut down version of drm_memory_debug.h, which used to be called + * drm_memory.h. + */ + +/* Need the 4-argument version of vmap(). */ +#if __OS_HAS_AGP && defined(VMAP_4_ARGS) + +#include <linux/vmalloc.h> + +#ifdef HAVE_PAGE_AGP +#include <asm/agp.h> +#else +# ifdef __powerpc__ +# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) +# else +# define PAGE_AGP PAGE_KERNEL +# endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) +#ifndef pte_offset_kernel +# define pte_offset_kernel(dir, address) pte_offset(dir, address) +#endif +#ifndef pte_pfn +# define pte_pfn(pte) (pte_page(pte) - mem_map) +#endif +#ifndef pfn_to_page +# define pfn_to_page(pfn) (mem_map + (pfn)) +#endif +#endif + +/* + * Find the drm_map that covers the range [offset, offset+size). + */ +static inline drm_map_t *drm_lookup_map(unsigned long offset, + unsigned long size, drm_device_t * dev) +{ + struct list_head *list; + drm_map_list_t *r_list; + drm_map_t *map; + + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *) list; + map = r_list->map; + if (!map) + continue; + if (map->offset <= offset + && (offset + size) <= (map->offset + map->size)) + return map; + } + return NULL; +} + +static inline void *agp_remap(unsigned long offset, unsigned long size, + drm_device_t * dev) +{ + unsigned long *phys_addr_map, i, num_pages = + PAGE_ALIGN(size) / PAGE_SIZE; + struct drm_agp_mem *agpmem; + struct page **page_map; + void *addr; + + size = PAGE_ALIGN(size); + +#ifdef __alpha__ + offset -= dev->hose->mem_space->start; +#endif + + for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) + if (agpmem->bound <= offset + && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= + (offset + size)) + break; + if (!agpmem) + return NULL; + + /* + * OK, we're mapping AGP space on a chipset/platform on which memory accesses by + * the CPU do not get remapped by the GART. We fix this by using the kernel's + * page-table instead (that's probably faster anyhow...). + */ + /* note: use vmalloc() because num_pages could be large... */ + page_map = vmalloc(num_pages * sizeof(struct page *)); + if (!page_map) + return NULL; + + phys_addr_map = + agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; + for (i = 0; i < num_pages; ++i) + page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); + addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); + vfree(page_map); + + return addr; +} + +static inline unsigned long drm_follow_page(void *vaddr) +{ + pgd_t *pgd = pgd_offset_k((unsigned long) vaddr); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10) + pmd_t *pmd = pmd_offset(pgd, (unsigned long)vaddr); +#else + pud_t *pud = pud_offset(pgd, (unsigned long) vaddr); + pmd_t *pmd = pmd_offset(pud, (unsigned long) vaddr); +#endif + pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr); + return pte_pfn(*ptep) << PAGE_SHIFT; +} + +#else /* __OS_HAS_AGP */ + +static inline drm_map_t *drm_lookup_map(unsigned long offset, + unsigned long size, drm_device_t * dev) +{ + return NULL; +} + +static inline void *agp_remap(unsigned long offset, unsigned long size, + drm_device_t * dev) +{ + return NULL; +} + +static inline unsigned long drm_follow_page(void *vaddr) +{ + return 0; +} +#endif + +#ifndef DEBUG_MEMORY +static inline void *drm_ioremap(unsigned long offset, unsigned long size, + drm_device_t * dev) +{ +#if defined(VMAP_4_ARGS) + if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { + drm_map_t *map = drm_lookup_map(offset, size, dev); + + if (map && map->type == _DRM_AGP) + return agp_remap(offset, size, dev); + } +#endif + + return ioremap(offset, size); +} + +static inline void *drm_ioremap_nocache(unsigned long offset, + unsigned long size, drm_device_t * dev) +{ +#if defined(VMAP_4_ARGS) + if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { + drm_map_t *map = drm_lookup_map(offset, size, dev); + + if (map && map->type == _DRM_AGP) + return agp_remap(offset, size, dev); + } +#endif + + return ioremap_nocache(offset, size); +} + +static inline void drm_ioremapfree(void *pt, unsigned long size, + drm_device_t * dev) +{ +#if defined(VMAP_4_ARGS) + /* + * This is a bit ugly. It would be much cleaner if the DRM API would use separate + * routines for handling mappings in the AGP space. Hopefully this can be done in + * a future revision of the interface... + */ + if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture + && ((unsigned long)pt >= VMALLOC_START + && (unsigned long)pt < VMALLOC_END)) { + unsigned long offset; + drm_map_t *map; + + offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK); + map = drm_lookup_map(offset, size, dev); + if (map && map->type == _DRM_AGP) { + vunmap(pt); + return; + } + } +#endif + iounmap(pt); +} +#else +extern void *drm_ioremap(unsigned long offset, unsigned long size, + drm_device_t * dev); +extern void *drm_ioremap_nocache(unsigned long offset, + unsigned long size, drm_device_t * dev); +extern void drm_ioremapfree(void *pt, unsigned long size, + drm_device_t * dev); +#endif diff --git a/nx-X11/extras/drm/linux-core/drm_memory_debug.c b/nx-X11/extras/drm/linux-core/drm_memory_debug.c new file mode 100644 index 000000000..2fe7aeaac --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_memory_debug.c @@ -0,0 +1,477 @@ +/** + * \file drm_memory_debug.c + * Memory management wrappers for DRM. + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/config.h> +#include "drmP.h" + +#ifdef DEBUG_MEMORY + +typedef struct drm_mem_stats { + const char *name; + int succeed_count; + int free_count; + int fail_count; + unsigned long bytes_allocated; + unsigned long bytes_freed; +} drm_mem_stats_t; + +static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; +static unsigned long drm_ram_available = 0; /* In pages */ +static unsigned long drm_ram_used = 0; +static drm_mem_stats_t drm_mem_stats[] = { + [DRM_MEM_DMA] = {"dmabufs"}, + [DRM_MEM_SAREA] = {"sareas"}, + [DRM_MEM_DRIVER] = {"driver"}, + [DRM_MEM_MAGIC] = {"magic"}, + [DRM_MEM_IOCTLS] = {"ioctltab"}, + [DRM_MEM_MAPS] = {"maplist"}, + [DRM_MEM_VMAS] = {"vmalist"}, + [DRM_MEM_BUFS] = {"buflist"}, + [DRM_MEM_SEGS] = {"seglist"}, + [DRM_MEM_PAGES] = {"pagelist"}, + [DRM_MEM_FILES] = {"files"}, + [DRM_MEM_QUEUES] = {"queues"}, + [DRM_MEM_CMDS] = {"commands"}, + [DRM_MEM_MAPPINGS] = {"mappings"}, + [DRM_MEM_BUFLISTS] = {"buflists"}, + [DRM_MEM_AGPLISTS] = {"agplist"}, + [DRM_MEM_SGLISTS] = {"sglist"}, + [DRM_MEM_TOTALAGP] = {"totalagp"}, + [DRM_MEM_BOUNDAGP] = {"boundagp"}, + [DRM_MEM_CTXBITMAP] = {"ctxbitmap"}, + [DRM_MEM_CTXLIST] = {"ctxlist"}, + [DRM_MEM_STUB] = {"stub"}, + {NULL, 0,} /* Last entry must be null */ +}; + +void drm_mem_init(void) +{ + drm_mem_stats_t *mem; + struct sysinfo si; + + for (mem = drm_mem_stats; mem->name; ++mem) { + mem->succeed_count = 0; + mem->free_count = 0; + mem->fail_count = 0; + mem->bytes_allocated = 0; + mem->bytes_freed = 0; + } + + si_meminfo(&si); + drm_ram_available = si.totalram; + drm_ram_used = 0; +} + +/* drm_mem_info is called whenever a process reads /dev/drm/mem. */ + +static int drm__mem_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + drm_mem_stats_t *pt; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *eof = 0; + *start = &buf[offset]; + + DRM_PROC_PRINT(" total counts " + " | outstanding \n"); + DRM_PROC_PRINT("type alloc freed fail bytes freed" + " | allocs bytes\n\n"); + DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", + "system", 0, 0, 0, + drm_ram_available << (PAGE_SHIFT - 10)); + DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", + "locked", 0, 0, 0, drm_ram_used >> 10); + DRM_PROC_PRINT("\n"); + for (pt = drm_mem_stats; pt->name; pt++) { + DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", + pt->name, + pt->succeed_count, + pt->free_count, + pt->fail_count, + pt->bytes_allocated, + pt->bytes_freed, + pt->succeed_count - pt->free_count, + (long)pt->bytes_allocated + - (long)pt->bytes_freed); + } + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +int drm_mem_info(char *buf, char **start, off_t offset, + int len, int *eof, void *data) +{ + int ret; + + spin_lock(&drm_mem_lock); + ret = drm__mem_info(buf, start, offset, len, eof, data); + spin_unlock(&drm_mem_lock); + return ret; +} + +void *drm_alloc(size_t size, int area) +{ + void *pt; + + if (!size) { + DRM_MEM_ERROR(area, "Allocating 0 bytes\n"); + return NULL; + } + + if (!(pt = kmalloc(size, GFP_KERNEL))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].fail_count; + spin_unlock(&drm_mem_lock); + return NULL; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_allocated += size; + spin_unlock(&drm_mem_lock); + return pt; +} +EXPORT_SYMBOL(drm_alloc); + +void *drm_calloc(size_t nmemb, size_t size, int area) +{ + void *addr; + + addr = drm_alloc(nmemb * size, area); + if (addr != NULL) + memset((void *)addr, 0, size * nmemb); + + return addr; +} +EXPORT_SYMBOL(drm_calloc); + +void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) +{ + void *pt; + + if (!(pt = drm_alloc(size, area))) + return NULL; + if (oldpt && oldsize) { + memcpy(pt, oldpt, oldsize); + drm_free(oldpt, oldsize, area); + } + return pt; +} +EXPORT_SYMBOL(drm_realloc); + +void drm_free(void *pt, size_t size, int area) +{ + int alloc_count; + int free_count; + + if (!pt) + DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); + else + kfree(pt); + spin_lock(&drm_mem_lock); + drm_mem_stats[area].bytes_freed += size; + free_count = ++drm_mem_stats[area].free_count; + alloc_count = drm_mem_stats[area].succeed_count; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } +} +EXPORT_SYMBOL(drm_free); + +unsigned long drm_alloc_pages(int order, int area) +{ + unsigned long address; + unsigned long bytes = PAGE_SIZE << order; + unsigned long addr; + unsigned int sz; + + spin_lock(&drm_mem_lock); + if ((drm_ram_used >> PAGE_SHIFT) + > (DRM_RAM_PERCENT * drm_ram_available) / 100) { + spin_unlock(&drm_mem_lock); + return 0; + } + spin_unlock(&drm_mem_lock); + + address = __get_free_pages(GFP_KERNEL, order); + if (!address) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].fail_count; + spin_unlock(&drm_mem_lock); + return 0; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_allocated += bytes; + drm_ram_used += bytes; + spin_unlock(&drm_mem_lock); + + /* Zero outside the lock */ + memset((void *)address, 0, bytes); + + /* Reserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + SetPageReserved(virt_to_page(addr)); + } + + return address; +} + +void drm_free_pages(unsigned long address, int order, int area) +{ + unsigned long bytes = PAGE_SIZE << order; + int alloc_count; + int free_count; + unsigned long addr; + unsigned int sz; + + if (!address) { + DRM_MEM_ERROR(area, "Attempt to free address 0\n"); + } else { + /* Unreserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + } + free_pages(address, order); + } + + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[area].free_count; + alloc_count = drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_freed += bytes; + drm_ram_used -= bytes; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(area, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } +} + +void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t * dev) +{ + void *pt; + + if (!size) { + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Mapping 0 bytes at 0x%08lx\n", offset); + return NULL; + } + + if (!(pt = drm_ioremap(offset, size, dev))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; + spin_unlock(&drm_mem_lock); + return NULL; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; + spin_unlock(&drm_mem_lock); + return pt; +} +EXPORT_SYMBOL(drm_ioremap); + +void *drm_ioremap_nocache(unsigned long offset, unsigned long size, + drm_device_t * dev) +{ + void *pt; + + if (!size) { + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Mapping 0 bytes at 0x%08lx\n", offset); + return NULL; + } + + if (!(pt = drm_ioremap_nocache(offset, size, dev))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; + spin_unlock(&drm_mem_lock); + return NULL; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; + spin_unlock(&drm_mem_lock); + return pt; +} +EXPORT_SYMBOL(drm_ioremap_nocache); + +void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev) +{ + int alloc_count; + int free_count; + + if (!pt) + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Attempt to free NULL pointer\n"); + else + drm_ioremapfree(pt, size, dev); + + spin_lock(&drm_mem_lock); + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size; + free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count; + alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } +} +EXPORT_SYMBOL(drm_ioremapfree); + +#if __OS_HAS_AGP + +DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +{ + DRM_AGP_MEM *handle; + + if (!pages) { + DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n"); + return NULL; + } + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) + if ((handle = drm_agp_allocate_memory(pages, type))) { +#else + if ((handle = drm_agp_allocate_memory(dev->agp->bridge, pages, type))) { +#endif + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated + += pages << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + return handle; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; + spin_unlock(&drm_mem_lock); + return NULL; +} + +int drm_free_agp(DRM_AGP_MEM * handle, int pages) +{ + int alloc_count; + int free_count; + int retval = -EINVAL; + + if (!handle) { + DRM_MEM_ERROR(DRM_MEM_TOTALAGP, + "Attempt to free NULL AGP handle\n"); + return retval; + } + + if (drm_agp_free_memory(handle)) { + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; + alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed + += pages << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(DRM_MEM_TOTALAGP, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } + return 0; + } + return retval; +} + +int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) +{ + int retcode = -EINVAL; + + if (!handle) { + DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, + "Attempt to bind NULL AGP handle\n"); + return retcode; + } + + if (!(retcode = drm_agp_bind_memory(handle, start))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated + += handle->page_count << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + return retcode; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; + spin_unlock(&drm_mem_lock); + return retcode; +} + +int drm_unbind_agp(DRM_AGP_MEM * handle) +{ + int alloc_count; + int free_count; + int retcode = -EINVAL; + + if (!handle) { + DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, + "Attempt to unbind NULL AGP handle\n"); + return retcode; + } + + if ((retcode = drm_agp_unbind_memory(handle))) + return retcode; + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; + alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed + += handle->page_count << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } + return retcode; +} + +#endif +#endif diff --git a/nx-X11/extras/drm/linux-core/drm_memory_debug.h b/nx-X11/extras/drm/linux-core/drm_memory_debug.h new file mode 100644 index 000000000..706b75251 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_memory_debug.h @@ -0,0 +1,448 @@ +/** + * \file drm_memory_debug.h + * Memory management wrappers for DRM. + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/config.h> +#include "drmP.h" + +typedef struct drm_mem_stats { + const char *name; + int succeed_count; + int free_count; + int fail_count; + unsigned long bytes_allocated; + unsigned long bytes_freed; +} drm_mem_stats_t; + +static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; +static unsigned long drm_ram_available = 0; /* In pages */ +static unsigned long drm_ram_used = 0; +static drm_mem_stats_t drm_mem_stats[] = +{ + [DRM_MEM_DMA] = {"dmabufs"}, + [DRM_MEM_SAREA] = {"sareas"}, + [DRM_MEM_DRIVER] = {"driver"}, + [DRM_MEM_MAGIC] = {"magic"}, + [DRM_MEM_IOCTLS] = {"ioctltab"}, + [DRM_MEM_MAPS] = {"maplist"}, + [DRM_MEM_VMAS] = {"vmalist"}, + [DRM_MEM_BUFS] = {"buflist"}, + [DRM_MEM_SEGS] = {"seglist"}, + [DRM_MEM_PAGES] = {"pagelist"}, + [DRM_MEM_FILES] = {"files"}, + [DRM_MEM_QUEUES] = {"queues"}, + [DRM_MEM_CMDS] = {"commands"}, + [DRM_MEM_MAPPINGS] = {"mappings"}, + [DRM_MEM_BUFLISTS] = {"buflists"}, + [DRM_MEM_AGPLISTS] = {"agplist"}, + [DRM_MEM_SGLISTS] = {"sglist"}, + [DRM_MEM_TOTALAGP] = {"totalagp"}, + [DRM_MEM_BOUNDAGP] = {"boundagp"}, + [DRM_MEM_CTXBITMAP] = {"ctxbitmap"}, + [DRM_MEM_CTXLIST] = {"ctxlist"}, + [DRM_MEM_STUB] = {"stub"}, + {NULL, 0,} /* Last entry must be null */ +}; + +void drm_mem_init (void) { + drm_mem_stats_t *mem; + struct sysinfo si; + + for (mem = drm_mem_stats; mem->name; ++mem) { + mem->succeed_count = 0; + mem->free_count = 0; + mem->fail_count = 0; + mem->bytes_allocated = 0; + mem->bytes_freed = 0; + } + + si_meminfo(&si); + drm_ram_available = si.totalram; + drm_ram_used = 0; +} + +/* drm_mem_info is called whenever a process reads /dev/drm/mem. */ + +static int drm__mem_info (char *buf, char **start, off_t offset, + int request, int *eof, void *data) { + drm_mem_stats_t *pt; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *eof = 0; + *start = &buf[offset]; + + DRM_PROC_PRINT(" total counts " + " | outstanding \n"); + DRM_PROC_PRINT("type alloc freed fail bytes freed" + " | allocs bytes\n\n"); + DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", + "system", 0, 0, 0, + drm_ram_available << (PAGE_SHIFT - 10)); + DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", + "locked", 0, 0, 0, drm_ram_used >> 10); + DRM_PROC_PRINT("\n"); + for (pt = drm_mem_stats; pt->name; pt++) { + DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", + pt->name, + pt->succeed_count, + pt->free_count, + pt->fail_count, + pt->bytes_allocated, + pt->bytes_freed, + pt->succeed_count - pt->free_count, + (long)pt->bytes_allocated + - (long)pt->bytes_freed); + } + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +int drm_mem_info (char *buf, char **start, off_t offset, + int len, int *eof, void *data) { + int ret; + + spin_lock(&drm_mem_lock); + ret = drm__mem_info (buf, start, offset, len, eof, data); + spin_unlock(&drm_mem_lock); + return ret; +} + +void *drm_alloc (size_t size, int area) { + void *pt; + + if (!size) { + DRM_MEM_ERROR(area, "Allocating 0 bytes\n"); + return NULL; + } + + if (!(pt = kmalloc(size, GFP_KERNEL))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].fail_count; + spin_unlock(&drm_mem_lock); + return NULL; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_allocated += size; + spin_unlock(&drm_mem_lock); + return pt; +} + +void *drm_calloc (size_t nmemb, size_t size, int area) { + void *addr; + + addr = drm_alloc (nmemb * size, area); + if (addr != NULL) + memset((void *)addr, 0, size * nmemb); + + return addr; +} + +void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) { + void *pt; + + if (!(pt = drm_alloc (size, area))) + return NULL; + if (oldpt && oldsize) { + memcpy(pt, oldpt, oldsize); + drm_free (oldpt, oldsize, area); + } + return pt; +} + +void drm_free (void *pt, size_t size, int area) { + int alloc_count; + int free_count; + + if (!pt) + DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); + else + kfree(pt); + spin_lock(&drm_mem_lock); + drm_mem_stats[area].bytes_freed += size; + free_count = ++drm_mem_stats[area].free_count; + alloc_count = drm_mem_stats[area].succeed_count; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } +} + +unsigned long drm_alloc_pages (int order, int area) { + unsigned long address; + unsigned long bytes = PAGE_SIZE << order; + unsigned long addr; + unsigned int sz; + + spin_lock(&drm_mem_lock); + if ((drm_ram_used >> PAGE_SHIFT) + > (DRM_RAM_PERCENT * drm_ram_available) / 100) { + spin_unlock(&drm_mem_lock); + return 0; + } + spin_unlock(&drm_mem_lock); + + address = __get_free_pages(GFP_KERNEL, order); + if (!address) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].fail_count; + spin_unlock(&drm_mem_lock); + return 0; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_allocated += bytes; + drm_ram_used += bytes; + spin_unlock(&drm_mem_lock); + + /* Zero outside the lock */ + memset((void *)address, 0, bytes); + + /* Reserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + SetPageReserved(virt_to_page(addr)); + } + + return address; +} + +void drm_free_pages (unsigned long address, int order, int area) { + unsigned long bytes = PAGE_SIZE << order; + int alloc_count; + int free_count; + unsigned long addr; + unsigned int sz; + + if (!address) { + DRM_MEM_ERROR(area, "Attempt to free address 0\n"); + } else { + /* Unreserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + } + free_pages(address, order); + } + + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[area].free_count; + alloc_count = drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_freed += bytes; + drm_ram_used -= bytes; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(area, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } +} + +void *drm_ioremap (unsigned long offset, unsigned long size, + drm_device_t * dev) { + void *pt; + + if (!size) { + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Mapping 0 bytes at 0x%08lx\n", offset); + return NULL; + } + + if (!(pt = drm_ioremap(offset, size, dev))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; + spin_unlock(&drm_mem_lock); + return NULL; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; + spin_unlock(&drm_mem_lock); + return pt; +} + +void *drm_ioremap_nocache (unsigned long offset, unsigned long size, + drm_device_t * dev) { + void *pt; + + if (!size) { + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Mapping 0 bytes at 0x%08lx\n", offset); + return NULL; + } + + if (!(pt = drm_ioremap_nocache(offset, size, dev))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; + spin_unlock(&drm_mem_lock); + return NULL; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; + spin_unlock(&drm_mem_lock); + return pt; +} + +void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) { + int alloc_count; + int free_count; + + if (!pt) + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Attempt to free NULL pointer\n"); + else + drm_ioremapfree(pt, size, dev); + + spin_lock(&drm_mem_lock); + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size; + free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count; + alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } +} + +#if __OS_HAS_AGP + +DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { + DRM_AGP_MEM *handle; + + if (!pages) { + DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n"); + return NULL; + } + + if ((handle = drm_agp_allocate_memory (pages, type))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated + += pages << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + return handle; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; + spin_unlock(&drm_mem_lock); + return NULL; +} + +int drm_free_agp (DRM_AGP_MEM * handle, int pages) { + int alloc_count; + int free_count; + int retval = -EINVAL; + + if (!handle) { + DRM_MEM_ERROR(DRM_MEM_TOTALAGP, + "Attempt to free NULL AGP handle\n"); + return retval; + } + + if (drm_agp_free_memory (handle)) { + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; + alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed + += pages << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(DRM_MEM_TOTALAGP, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } + return 0; + } + return retval; +} + +int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) { + int retcode = -EINVAL; + + if (!handle) { + DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, + "Attempt to bind NULL AGP handle\n"); + return retcode; + } + + if (!(retcode = drm_agp_bind_memory (handle, start))) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated + += handle->page_count << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + return retcode; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; + spin_unlock(&drm_mem_lock); + return retcode; +} + +int drm_unbind_agp (DRM_AGP_MEM * handle) { + int alloc_count; + int free_count; + int retcode = -EINVAL; + + if (!handle) { + DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, + "Attempt to unbind NULL AGP handle\n"); + return retcode; + } + + if ((retcode = drm_agp_unbind_memory (handle))) + return retcode; + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; + alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed + += handle->page_count << PAGE_SHIFT; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } + return retcode; +} +#endif diff --git a/nx-X11/extras/drm/linux-core/drm_os_linux.h b/nx-X11/extras/drm/linux-core/drm_os_linux.h new file mode 100644 index 000000000..d3fb67e60 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_os_linux.h @@ -0,0 +1,172 @@ +/** + * \file drm_os_linux.h + * OS abstraction macros. + */ + +#include <linux/interrupt.h> /* For task queue support */ +#include <linux/delay.h> + +/** File pointer type */ +#define DRMFILE struct file * +/** Ioctl arguments */ +#define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data +#define DRM_ERR(d) -(d) +/** Current process ID */ +#define DRM_CURRENTPID current->pid +#define DRM_SUSER(p) capable(CAP_SYS_ADMIN) +#define DRM_UDELAY(d) udelay(d) +#if LINUX_VERSION_CODE <= 0x020608 /* KERNEL_VERSION(2,6,8) */ +/** Read a byte from a MMIO region */ +#define DRM_READ8(map, offset) readb(((unsigned long)(map)->handle) + (offset)) +/** Read a word from a MMIO region */ +#define DRM_READ16(map, offset) readw(((unsigned long)(map)->handle) + (offset)) +/** Read a dword from a MMIO region */ +#define DRM_READ32(map, offset) readl(((unsigned long)(map)->handle) + (offset)) +/** Write a byte into a MMIO region */ +#define DRM_WRITE8(map, offset, val) writeb(val, ((unsigned long)(map)->handle) + (offset)) +/** Write a word into a MMIO region */ +#define DRM_WRITE16(map, offset, val) writew(val, ((unsigned long)(map)->handle) + (offset)) +/** Write a dword into a MMIO region */ +#define DRM_WRITE32(map, offset, val) writel(val, ((unsigned long)(map)->handle) + (offset)) +#else +/** Read a byte from a MMIO region */ +#define DRM_READ8(map, offset) readb((map)->handle + (offset)) +/** Read a word from a MMIO region */ +#define DRM_READ16(map, offset) readw((map)->handle + (offset)) +/** Read a dword from a MMIO region */ +#define DRM_READ32(map, offset) readl((map)->handle + (offset)) +/** Write a byte into a MMIO region */ +#define DRM_WRITE8(map, offset, val) writeb(val, (map)->handle + (offset)) +/** Write a word into a MMIO region */ +#define DRM_WRITE16(map, offset, val) writew(val, (map)->handle + (offset)) +/** Write a dword into a MMIO region */ +#define DRM_WRITE32(map, offset, val) writel(val, (map)->handle + (offset)) +#endif +/** Read memory barrier */ +#define DRM_READMEMORYBARRIER() rmb() +/** Write memory barrier */ +#define DRM_WRITEMEMORYBARRIER() wmb() +/** Read/write memory barrier */ +#define DRM_MEMORYBARRIER() mb() +/** DRM device local declaration */ +#define DRM_DEVICE drm_file_t *priv = filp->private_data; \ + drm_device_t *dev = priv->head->dev + +/** IRQ handler arguments and return type and values */ +#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs +/** backwards compatibility with old irq return values */ +#ifndef IRQ_HANDLED +typedef void irqreturn_t; +#define IRQ_HANDLED /* nothing */ +#define IRQ_NONE /* nothing */ +#endif + +/** AGP types */ +#if __OS_HAS_AGP +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,70) +#define DRM_AGP_MEM agp_memory +#define DRM_AGP_KERN agp_kern_info +#else +#define DRM_AGP_MEM struct agp_memory +#define DRM_AGP_KERN struct agp_kern_info +#endif +#else +/* define some dummy types for non AGP supporting kernels */ +struct no_agp_kern { + unsigned long aper_base; + unsigned long aper_size; +}; +#define DRM_AGP_MEM int +#define DRM_AGP_KERN struct no_agp_kern +#endif + +#if !(__OS_HAS_MTRR) +static __inline__ int mtrr_add(unsigned long base, unsigned long size, + unsigned int type, char increment) +{ + return -ENODEV; +} + +static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) +{ + return -ENODEV; +} + +#define MTRR_TYPE_WRCOMB 1 +#endif + +/** Task queue handler arguments */ +#define DRM_TASKQUEUE_ARGS void *arg + +/** For data going into the kernel through the ioctl argument */ +#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \ + if ( copy_from_user(&arg1, arg2, arg3) ) \ + return -EFAULT +/** For data going from the kernel through the ioctl argument */ +#define DRM_COPY_TO_USER_IOCTL(arg1, arg2, arg3) \ + if ( copy_to_user(arg1, &arg2, arg3) ) \ + return -EFAULT +/** Other copying of data to kernel space */ +#define DRM_COPY_FROM_USER(arg1, arg2, arg3) \ + copy_from_user(arg1, arg2, arg3) +/** Other copying of data from kernel space */ +#define DRM_COPY_TO_USER(arg1, arg2, arg3) \ + copy_to_user(arg1, arg2, arg3) +/* Macros for copyfrom user, but checking readability only once */ +#define DRM_VERIFYAREA_READ( uaddr, size ) \ + (access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT) +#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ + __copy_from_user(arg1, arg2, arg3) +#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \ + __copy_to_user(arg1, arg2, arg3) +#define DRM_GET_USER_UNCHECKED(val, uaddr) \ + __get_user(val, uaddr) + +#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data + +/** + * Get the pointer to the SAREA. + * + * Searches the SAREA on the mapping lists and points drm_device::sarea to it. + */ +#define DRM_GETSAREA() \ +do { \ + drm_map_list_t *entry; \ + list_for_each_entry( entry, &dev->maplist->head, head ) { \ + if ( entry->map && \ + entry->map->type == _DRM_SHM && \ + (entry->map->flags & _DRM_CONTAINS_LOCK) ) { \ + dev_priv->sarea = entry->map; \ + break; \ + } \ + } \ +} while (0) + +#define DRM_HZ HZ + +#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ +do { \ + DECLARE_WAITQUEUE(entry, current); \ + unsigned long end = jiffies + (timeout); \ + add_wait_queue(&(queue), &entry); \ + \ + for (;;) { \ + __set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (time_after_eq(jiffies, end)) { \ + ret = -EBUSY; \ + break; \ + } \ + schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \ + if (signal_pending(current)) { \ + ret = -EINTR; \ + break; \ + } \ + } \ + __set_current_state(TASK_RUNNING); \ + remove_wait_queue(&(queue), &entry); \ +} while (0) + +#define DRM_WAKEUP( queue ) wake_up_interruptible( queue ) +#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) diff --git a/nx-X11/extras/drm/linux-core/drm_pci.c b/nx-X11/extras/drm/linux-core/drm_pci.c new file mode 100644 index 000000000..dd9b0c8c2 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_pci.c @@ -0,0 +1,186 @@ +/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */ +/** + * \file drm_pci.c + * \brief Functions and ioctls to manage PCI memory + * + * \warning These interfaces aren't stable yet. + * + * \todo Implement the remaining ioctl's for the PCI pools. + * \todo The wrappers here are so thin that they would be better off inlined.. + * + * \author Jose Fonseca <jrfonseca@tungstengraphics.com> + * \author Leif Delgass <ldelgass@retinalburn.net> + */ + +/* + * Copyright 2003 Jos�Fonseca. + * Copyright 2003 Leif Delgass. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/pci.h> +#include "drmP.h" + +/**********************************************************************/ +/** \name PCI memory */ +/*@{*/ + +/** + * \brief Allocate a PCI consistent memory block, for DMA. + */ +drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, + dma_addr_t maxaddr) +{ + drm_dma_handle_t *dmah; +#if 0 + unsigned long addr; + size_t sz; +#endif +#if DRM_DEBUG_MEMORY + int area = DRM_MEM_DMA; + + spin_lock(&drm_mem_lock); + if ((drm_ram_used >> PAGE_SHIFT) + > (DRM_RAM_PERCENT * drm_ram_available) / 100) { + spin_unlock(&drm_mem_lock); + return 0; + } + spin_unlock(&drm_mem_lock); +#endif + + /* pci_alloc_consistent only guarantees alignment to the smallest + * PAGE_SIZE order which is greater than or equal to the requested size. + * Return NULL here for now to make sure nobody tries for larger alignment + */ + if (align > size) + return NULL; + + if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) { + DRM_ERROR("Setting pci dma mask failed\n"); + return NULL; + } + + dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); + if (!dmah) + return NULL; + + dmah->size = size; + dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr); + +#if DRM_DEBUG_MEMORY + if (dmah->vaddr == NULL) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].fail_count; + spin_unlock(&drm_mem_lock); + kfree(dmah); + return NULL; + } + + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_allocated += size; + drm_ram_used += size; + spin_unlock(&drm_mem_lock); +#else + if (dmah->vaddr == NULL) { + kfree(dmah); + return NULL; + } +#endif + + memset(dmah->vaddr, 0, size); + +#if 0 + /* XXX - Is virt_to_page() legal for consistent mem? */ + /* Reserve */ + for (addr = (unsigned long)dmah->vaddr, sz = size; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + SetPageReserved(virt_to_page(addr)); + } +#endif + + return dmah; +} +EXPORT_SYMBOL(drm_pci_alloc); + +/** + * \brief Free a PCI consistent memory block without freeing its descriptor. + * + * This function is for internal use in the Linux-specific DRM core code. + */ +void +__drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +{ +#if 0 + unsigned long addr; + size_t sz; +#endif +#if DRM_DEBUG_MEMORY + int area = DRM_MEM_DMA; + int alloc_count; + int free_count; +#endif + + if (!dmah->vaddr) { +#if DRM_DEBUG_MEMORY + DRM_MEM_ERROR(area, "Attempt to free address 0\n"); +#endif + } else { +#if 0 + /* XXX - Is virt_to_page() legal for consistent mem? */ + /* Unreserve */ + for (addr = (unsigned long)dmah->vaddr, sz = size; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + } +#endif + pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr, + dmah->busaddr); + } + +#if DRM_DEBUG_MEMORY + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[area].free_count; + alloc_count = drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_freed += size; + drm_ram_used -= size; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(area, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } +#endif + +} + +/** + * \brief Free a PCI consistent memory block. + */ +void +drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +{ + __drm_pci_free(dev, dmah); + kfree(dmah); +} +EXPORT_SYMBOL(drm_pci_free); + +/*@}*/ diff --git a/nx-X11/extras/drm/linux-core/drm_proc.c b/nx-X11/extras/drm/linux-core/drm_proc.c new file mode 100644 index 000000000..3e2b3ba08 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_proc.c @@ -0,0 +1,550 @@ +/** + * \file drm_proc.c + * /proc support for DRM + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + * + * \par Acknowledgements: + * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix + * the problem with the proc files not outputting all their information. + */ + +/* + * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +static int drm_name_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +static int drm_vm_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +static int drm_clients_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +static int drm_queues_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +static int drm_bufs_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +#if DRM_DEBUG_CODE +static int drm_vma_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +#endif + +/** + * Proc file list. + */ +static struct drm_proc_list { + const char *name; /**< file name */ + int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ +} drm_proc_list[] = { + {"name", drm_name_info}, + {"mem", drm_mem_info}, + {"vm", drm_vm_info}, + {"clients", drm_clients_info}, + {"queues", drm_queues_info}, + {"bufs", drm_bufs_info}, +#if DRM_DEBUG_CODE + {"vma", drm_vma_info}, +#endif +}; + +#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0])) + +/** + * Initialize the DRI proc filesystem for a device. + * + * \param dev DRM device. + * \param minor device minor number. + * \param root DRI proc dir entry. + * \param dev_root resulting DRI device proc dir entry. + * \return root entry pointer on success, or NULL on failure. + * + * Create the DRI proc root entry "/proc/dri", the device proc root entry + * "/proc/dri/%minor%/", and each entry in proc_list as + * "/proc/dri/%minor%/%name%". + */ +int drm_proc_init(drm_device_t * dev, int minor, + struct proc_dir_entry *root, struct proc_dir_entry **dev_root) +{ + struct proc_dir_entry *ent; + int i, j; + char name[64]; + + sprintf(name, "%d", minor); + *dev_root = create_proc_entry(name, S_IFDIR, root); + if (!*dev_root) { + DRM_ERROR("Cannot create /proc/dri/%s\n", name); + return -1; + } + + for (i = 0; i < DRM_PROC_ENTRIES; i++) { + ent = create_proc_entry(drm_proc_list[i].name, + S_IFREG | S_IRUGO, *dev_root); + if (!ent) { + DRM_ERROR("Cannot create /proc/dri/%s/%s\n", + name, drm_proc_list[i].name); + for (j = 0; j < i; j++) + remove_proc_entry(drm_proc_list[i].name, + *dev_root); + remove_proc_entry(name, root); + return -1; + } + ent->read_proc = drm_proc_list[i].f; + ent->data = dev; + } + return 0; +} + +/** + * Cleanup the proc filesystem resources. + * + * \param minor device minor number. + * \param root DRI proc dir entry. + * \param dev_root DRI device proc dir entry. + * \return always zero. + * + * Remove all proc entries created by proc_init(). + */ +int drm_proc_cleanup(int minor, struct proc_dir_entry *root, + struct proc_dir_entry *dev_root) +{ + int i; + char name[64]; + + if (!root || !dev_root) + return 0; + + for (i = 0; i < DRM_PROC_ENTRIES; i++) + remove_proc_entry(drm_proc_list[i].name, dev_root); + sprintf(name, "%d", minor); + remove_proc_entry(name, root); + + return 0; +} + +/** + * Called when "/proc/dri/.../name" is read. + * + * \param buf output buffer. + * \param start start of output data. + * \param offset requested start offset. + * \param request requested number of bytes. + * \param eof whether there is no more data to return. + * \param data private data. + * \return number of written bytes. + * + * Prints the device name together with the bus id if available. + */ +static int drm_name_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + + if (dev->unique) { + DRM_PROC_PRINT("%s %s %s\n", + dev->driver->pci_driver.name, pci_name(dev->pdev), + dev->unique); + } else { + DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, + pci_name(dev->pdev)); + } + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +/** + * Called when "/proc/dri/.../vm" is read. + * + * \param buf output buffer. + * \param start start of output data. + * \param offset requested start offset. + * \param request requested number of bytes. + * \param eof whether there is no more data to return. + * \param data private data. + * \return number of written bytes. + * + * Prints information about all mappings in drm_device::maplist. + */ +static int drm__vm_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int len = 0; + drm_map_t *map; + drm_map_list_t *r_list; + struct list_head *list; + + /* Hardcoded from _DRM_FRAME_BUFFER, + _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, + _DRM_SCATTER_GATHER, and _DRM_CONSISTENT. */ + const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; + const char *type; + int i; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + + DRM_PROC_PRINT("slot offset size type flags " + "address mtrr\n\n"); + i = 0; + if (dev->maplist != NULL) + list_for_each(list, &dev->maplist->head) { + r_list = list_entry(list, drm_map_list_t, head); + map = r_list->map; + if (!map) + continue; + if (map->type < 0 || map->type > 5) + type = "??"; + else + type = types[map->type]; + DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ", + i, + map->offset, + map->size, + type, map->flags, + r_list->user_token); + + if (map->mtrr < 0) { + DRM_PROC_PRINT("none\n"); + } else { + DRM_PROC_PRINT("%4d\n", map->mtrr); + } + i++; + } + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +/** + * Simply calls _vm_info() while holding the drm_device::struct_sem lock. + */ +static int drm_vm_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int ret; + + down(&dev->struct_sem); + ret = drm__vm_info(buf, start, offset, request, eof, data); + up(&dev->struct_sem); + return ret; +} + +/** + * Called when "/proc/dri/.../queues" is read. + * + * \param buf output buffer. + * \param start start of output data. + * \param offset requested start offset. + * \param request requested number of bytes. + * \param eof whether there is no more data to return. + * \param data private data. + * \return number of written bytes. + */ +static int drm__queues_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int len = 0; + int i; + drm_queue_t *q; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + + DRM_PROC_PRINT(" ctx/flags use fin" + " blk/rw/rwf wait flushed queued" + " locks\n\n"); + for (i = 0; i < dev->queue_count; i++) { + q = dev->queuelist[i]; + atomic_inc(&q->use_count); + DRM_PROC_PRINT_RET(atomic_dec(&q->use_count), + "%5d/0x%03x %5d %5d" + " %5d/%c%c/%c%c%c %5Zd\n", + i, + q->flags, + atomic_read(&q->use_count), + atomic_read(&q->finalization), + atomic_read(&q->block_count), + atomic_read(&q->block_read) ? 'r' : '-', + atomic_read(&q->block_write) ? 'w' : '-', + waitqueue_active(&q->read_queue) ? 'r' : '-', + waitqueue_active(&q-> + write_queue) ? 'w' : '-', + waitqueue_active(&q-> + flush_queue) ? 'f' : '-', + DRM_BUFCOUNT(&q->waitlist)); + atomic_dec(&q->use_count); + } + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +/** + * Simply calls _queues_info() while holding the drm_device::struct_sem lock. + */ +static int drm_queues_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int ret; + + down(&dev->struct_sem); + ret = drm__queues_info(buf, start, offset, request, eof, data); + up(&dev->struct_sem); + return ret; +} + +/** + * Called when "/proc/dri/.../bufs" is read. + * + * \param buf output buffer. + * \param start start of output data. + * \param offset requested start offset. + * \param request requested number of bytes. + * \param eof whether there is no more data to return. + * \param data private data. + * \return number of written bytes. + */ +static int drm__bufs_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int len = 0; + drm_device_dma_t *dma = dev->dma; + int i; + + if (!dma || offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + + DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); + for (i = 0; i <= DRM_MAX_ORDER; i++) { + if (dma->bufs[i].buf_count) + DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n", + i, + dma->bufs[i].buf_size, + dma->bufs[i].buf_count, + atomic_read(&dma->bufs[i] + .freelist.count), + dma->bufs[i].seg_count, + dma->bufs[i].seg_count + * (1 << dma->bufs[i].page_order), + (dma->bufs[i].seg_count + * (1 << dma->bufs[i].page_order)) + * PAGE_SIZE / 1024); + } + DRM_PROC_PRINT("\n"); + for (i = 0; i < dma->buf_count; i++) { + if (i && !(i % 32)) + DRM_PROC_PRINT("\n"); + DRM_PROC_PRINT(" %d", dma->buflist[i]->list); + } + DRM_PROC_PRINT("\n"); + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +/** + * Simply calls _bufs_info() while holding the drm_device::struct_sem lock. + */ +static int drm_bufs_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int ret; + + down(&dev->struct_sem); + ret = drm__bufs_info(buf, start, offset, request, eof, data); + up(&dev->struct_sem); + return ret; +} + +/** + * Called when "/proc/dri/.../clients" is read. + * + * \param buf output buffer. + * \param start start of output data. + * \param offset requested start offset. + * \param request requested number of bytes. + * \param eof whether there is no more data to return. + * \param data private data. + * \return number of written bytes. + */ +static int drm__clients_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int len = 0; + drm_file_t *priv; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + + DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n"); + for (priv = dev->file_first; priv; priv = priv->next) { + DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n", + priv->authenticated ? 'y' : 'n', + priv->minor, + priv->pid, + priv->uid, priv->magic, priv->ioctl_count); + } + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +/** + * Simply calls _clients_info() while holding the drm_device::struct_sem lock. + */ +static int drm_clients_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int ret; + + down(&dev->struct_sem); + ret = drm__clients_info(buf, start, offset, request, eof, data); + up(&dev->struct_sem); + return ret; +} + +#if DRM_DEBUG_CODE + +static int drm__vma_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int len = 0; + drm_vma_entry_t *pt; + struct vm_area_struct *vma; +#if defined(__i386__) + unsigned int pgprot; +#endif + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + + DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", + atomic_read(&dev->vma_count), + high_memory, virt_to_phys(high_memory)); + for (pt = dev->vmalist; pt; pt = pt->next) { + if (!(vma = pt->vma)) + continue; + DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx", + pt->pid, + vma->vm_start, + vma->vm_end, + vma->vm_flags & VM_READ ? 'r' : '-', + vma->vm_flags & VM_WRITE ? 'w' : '-', + vma->vm_flags & VM_EXEC ? 'x' : '-', + vma->vm_flags & VM_MAYSHARE ? 's' : 'p', + vma->vm_flags & VM_LOCKED ? 'l' : '-', + vma->vm_flags & VM_IO ? 'i' : '-', + VM_OFFSET(vma)); + +#if defined(__i386__) + pgprot = pgprot_val(vma->vm_page_prot); + DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c", + pgprot & _PAGE_PRESENT ? 'p' : '-', + pgprot & _PAGE_RW ? 'w' : 'r', + pgprot & _PAGE_USER ? 'u' : 's', + pgprot & _PAGE_PWT ? 't' : 'b', + pgprot & _PAGE_PCD ? 'u' : 'c', + pgprot & _PAGE_ACCESSED ? 'a' : '-', + pgprot & _PAGE_DIRTY ? 'd' : '-', + pgprot & _PAGE_PSE ? 'm' : 'k', + pgprot & _PAGE_GLOBAL ? 'g' : 'l'); +#endif + DRM_PROC_PRINT("\n"); + } + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static int drm_vma_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int ret; + + down(&dev->struct_sem); + ret = drm__vma_info(buf, start, offset, request, eof, data); + up(&dev->struct_sem); + return ret; +} +#endif diff --git a/nx-X11/extras/drm/linux-core/drm_scatter.c b/nx-X11/extras/drm/linux-core/drm_scatter.c new file mode 100644 index 000000000..1647303dd --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_scatter.c @@ -0,0 +1,230 @@ +/** + * \file drm_scatter.c + * IOCTLs to manage scatter/gather memory + * + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com + * + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <linux/config.h> +#include <linux/vmalloc.h> + +#include "drmP.h" + +#define DEBUG_SCATTER 0 + +void drm_sg_cleanup(drm_sg_mem_t * entry) +{ + struct page *page; + int i; + + for (i = 0; i < entry->pages; i++) { + page = entry->pagelist[i]; + if (page) + ClearPageReserved(page); + } + + vfree(entry->virtual); + + drm_free(entry->busaddr, + entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); + drm_free(entry->pagelist, + entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES); + drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); +} + +#ifdef _LP64 +# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) +#else +# define ScatterHandle(x) (unsigned int)(x) +#endif + +int drm_sg_alloc(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_scatter_gather_t __user *argp = (void __user *)arg; + drm_scatter_gather_t request; + drm_sg_mem_t *entry; + unsigned long pages, i, j; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if (!drm_core_check_feature(dev, DRIVER_SG)) + return -EINVAL; + + if (dev->sg) + return -EINVAL; + + if (copy_from_user(&request, argp, sizeof(request))) + return -EFAULT; + + entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); + if (!entry) + return -ENOMEM; + + memset(entry, 0, sizeof(*entry)); + + pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; + DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages); + + entry->pages = pages; + entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), + DRM_MEM_PAGES); + if (!entry->pagelist) { + drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); + return -ENOMEM; + } + + memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); + + entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr), + DRM_MEM_PAGES); + if (!entry->busaddr) { + drm_free(entry->pagelist, + entry->pages * sizeof(*entry->pagelist), + DRM_MEM_PAGES); + drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); + return -ENOMEM; + } + memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); + + entry->virtual = vmalloc_32(pages << PAGE_SHIFT); + if (!entry->virtual) { + drm_free(entry->busaddr, + entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); + drm_free(entry->pagelist, + entry->pages * sizeof(*entry->pagelist), + DRM_MEM_PAGES); + drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); + return -ENOMEM; + } + + /* This also forces the mapping of COW pages, so our page list + * will be valid. Please don't remove it... + */ + memset(entry->virtual, 0, pages << PAGE_SHIFT); + + entry->handle = ScatterHandle((unsigned long)entry->virtual); + + DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle); + DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual); + + for (i = (unsigned long)entry->virtual, j = 0; j < pages; + i += PAGE_SIZE, j++) { + entry->pagelist[j] = vmalloc_to_page((void *)i); + if (!entry->pagelist[j]) + goto failed; + SetPageReserved(entry->pagelist[j]); + } + + request.handle = entry->handle; + + if (copy_to_user(argp, &request, sizeof(request))) { + drm_sg_cleanup(entry); + return -EFAULT; + } + + dev->sg = entry; + +#if DEBUG_SCATTER + /* Verify that each page points to its virtual address, and vice + * versa. + */ + { + int error = 0; + + for (i = 0; i < pages; i++) { + unsigned long *tmp; + + tmp = page_address(entry->pagelist[i]); + for (j = 0; + j < PAGE_SIZE / sizeof(unsigned long); + j++, tmp++) { + *tmp = 0xcafebabe; + } + tmp = (unsigned long *)((u8 *) entry->virtual + + (PAGE_SIZE * i)); + for (j = 0; + j < PAGE_SIZE / sizeof(unsigned long); + j++, tmp++) { + if (*tmp != 0xcafebabe && error == 0) { + error = 1; + DRM_ERROR("Scatter allocation error, " + "pagelist does not match " + "virtual mapping\n"); + } + } + tmp = page_address(entry->pagelist[i]); + for (j = 0; + j < PAGE_SIZE / sizeof(unsigned long); + j++, tmp++) { + *tmp = 0; + } + } + if (error == 0) + DRM_ERROR("Scatter allocation matches pagelist\n"); + } +#endif + + return 0; + + failed: + drm_sg_cleanup(entry); + return -ENOMEM; +} + +int drm_sg_free(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_scatter_gather_t request; + drm_sg_mem_t *entry; + + if (!drm_core_check_feature(dev, DRIVER_SG)) + return -EINVAL; + + if (copy_from_user(&request, + (drm_scatter_gather_t __user *) arg, + sizeof(request))) + return -EFAULT; + + entry = dev->sg; + dev->sg = NULL; + + if (!entry || entry->handle != request.handle) + return -EINVAL; + + DRM_DEBUG("sg free virtual = %p\n", entry->virtual); + + drm_sg_cleanup(entry); + + return 0; +} diff --git a/nx-X11/extras/drm/linux-core/drm_stub.c b/nx-X11/extras/drm/linux-core/drm_stub.c new file mode 100644 index 000000000..52b5e9c61 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_stub.c @@ -0,0 +1,295 @@ +/** + * \file drm_stub.c + * Stub support + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + */ + +/* + * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org + * + * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> + +#include "drmP.h" +#include "drm_core.h" + +unsigned int cards_limit = 16; /* Enough for one machine */ +unsigned int drm_debug = 0; /* 1 to enable debug output */ +EXPORT_SYMBOL(drm_debug); + +MODULE_AUTHOR(CORE_AUTHOR); +MODULE_DESCRIPTION(CORE_DESC); +MODULE_LICENSE("GPL and additional rights"); +MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards"); +MODULE_PARM_DESC(debug, "Enable debug output"); + +module_param(cards_limit, int, S_IRUGO); +module_param_named(debug, drm_debug, int, S_IRUGO|S_IWUGO); + +drm_head_t **drm_heads; +struct drm_sysfs_class *drm_class; +struct proc_dir_entry *drm_proc_root; + +static int fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, + const struct pci_device_id *ent, + struct drm_driver *driver) +{ + int retcode; + + spin_lock_init(&dev->count_lock); + init_timer(&dev->timer); + sema_init(&dev->struct_sem, 1); + sema_init(&dev->ctxlist_sem, 1); + + dev->pdev = pdev; + +#ifdef __alpha__ + dev->hose = pdev->sysdata; + dev->pci_domain = dev->hose->bus->number; +#else + dev->pci_domain = 0; +#endif + dev->pci_bus = pdev->bus->number; + dev->pci_slot = PCI_SLOT(pdev->devfn); + dev->pci_func = PCI_FUNC(pdev->devfn); + dev->irq = pdev->irq; + + dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS); + if (dev->maplist == NULL) + return -ENOMEM; + INIT_LIST_HEAD(&dev->maplist->head); + + /* the DRM has 6 counters */ + dev->counters = 6; + dev->types[0] = _DRM_STAT_LOCK; + dev->types[1] = _DRM_STAT_OPENS; + dev->types[2] = _DRM_STAT_CLOSES; + dev->types[3] = _DRM_STAT_IOCTLS; + dev->types[4] = _DRM_STAT_LOCKS; + dev->types[5] = _DRM_STAT_UNLOCKS; + + dev->driver = driver; + + if (dev->driver->load) + if ((retcode = dev->driver->load(dev, ent->driver_data))) + goto error_out_unreg; + + if (drm_core_has_AGP(dev)) { + if (drm_device_is_agp(dev)) + dev->agp = drm_agp_init(dev); + if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) + && (dev->agp == NULL)) { + DRM_ERROR("Cannot initialize the agpgart module.\n"); + retcode = -EINVAL; + goto error_out_unreg; + } + + if (drm_core_has_MTRR(dev)) { + if (dev->agp) + dev->agp->agp_mtrr = + mtrr_add(dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * + 1024 * 1024, MTRR_TYPE_WRCOMB, 1); + } + } + + retcode = drm_ctxbitmap_init(dev); + if (retcode) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + goto error_out_unreg; + } + + return 0; + +error_out_unreg: + drm_lastclose(dev); + return retcode; +} + +/** + * Get a secondary minor number. + * + * \param dev device data structure + * \param sec-minor structure to hold the assigned minor + * \return negative number on failure. + * + * Search an empty entry and initialize it to the given parameters, and + * create the proc init entry via proc_init(). This routines assigns + * minor numbers to secondary heads of multi-headed cards + */ +static int drm_get_head(drm_device_t * dev, drm_head_t * head) +{ + drm_head_t **heads = drm_heads; + int ret; + int minor; + + DRM_DEBUG("\n"); + + for (minor = 0; minor < cards_limit; minor++, heads++) { + if (!*heads) { + + *head = (drm_head_t) { + .dev = dev, + .device = MKDEV(DRM_MAJOR, minor), + .minor = minor, + }; + if ((ret = + drm_proc_init(dev, minor, drm_proc_root, + &head->dev_root))) { + printk(KERN_ERR + "DRM: Failed to initialize /proc/dri.\n"); + goto err_g1; + } + + head->dev_class = drm_sysfs_device_add(drm_class, head); + if (IS_ERR(head->dev_class)) { + printk(KERN_ERR + "DRM: Error sysfs_device_add.\n"); + ret = PTR_ERR(head->dev_class); + goto err_g2; + } + *heads = head; + + DRM_DEBUG("new minor assigned %d\n", minor); + return 0; + } + } + DRM_ERROR("out of minors\n"); + return -ENOMEM; +err_g2: + drm_proc_cleanup(minor, drm_proc_root, head->dev_root); +err_g1: + *head = (drm_head_t) { + .dev = NULL}; + return ret; +} + +/** + * Register. + * + * \param pdev - PCI device structure + * \param ent entry from the PCI ID table with device type flags + * \return zero on success or a negative number on failure. + * + * Attempt to gets inter module "drm" information. If we are first + * then register the character device and inter module information. + * Try and register, if we fail to register, backout previous work. + */ +int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, + struct drm_driver *driver) +{ + drm_device_t *dev; + int ret; + + DRM_DEBUG("\n"); + + dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); + if (!dev) + return -ENOMEM; + + if (!drm_fb_loaded) { + pci_set_drvdata(pdev, dev); + pci_request_regions(pdev, driver->pci_driver.name); + pci_enable_device(pdev); + } + if ((ret = fill_in_dev(dev, pdev, ent, driver))) { + goto err_g1; + } + if ((ret = drm_get_head(dev, &dev->primary))) + goto err_g1; + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", + driver->name, driver->major, driver->minor, driver->patchlevel, + driver->date, dev->primary.minor, pci_pretty_name(dev->pdev)); + + return 0; + +err_g1: + if (!drm_fb_loaded) { + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + pci_disable_device(pdev); + } + drm_free(dev, sizeof(*dev), DRM_MEM_STUB); + printk(KERN_ERR "DRM: drm_get_dev failed.\n"); + return ret; +} +EXPORT_SYMBOL(drm_get_dev); + + +/** + * Put a device minor number. + * + * \param dev device data structure + * \return always zero + * + * Cleans up the proc resources. If it is the last minor then release the foreign + * "drm" data, otherwise unregisters the "drm" data, frees the dev list and + * unregisters the character device. + */ +int drm_put_dev(drm_device_t * dev) +{ + DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); + + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); + dev->unique = NULL; + dev->unique_len = 0; + } + if (dev->devname) { + drm_free(dev->devname, strlen(dev->devname) + 1, + DRM_MEM_DRIVER); + dev->devname = NULL; + } + drm_free(dev, sizeof(*dev), DRM_MEM_STUB); + return 0; +} + +/** + * Put a secondary minor number. + * + * \param sec_minor - structure to be released + * \return always zero + * + * Cleans up the proc resources. Not legal for this to be the + * last minor released. + * + */ +int drm_put_head(drm_head_t * head) +{ + int minor = head->minor; + + DRM_DEBUG("release secondary minor %d\n", minor); + + drm_proc_cleanup(minor, drm_proc_root, head->dev_root); + drm_sysfs_device_remove(head->dev_class); + + *head = (drm_head_t){.dev = NULL}; + + drm_heads[minor] = NULL; + return 0; +} diff --git a/nx-X11/extras/drm/linux-core/drm_sysfs.c b/nx-X11/extras/drm/linux-core/drm_sysfs.c new file mode 100644 index 000000000..a1faf7830 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_sysfs.c @@ -0,0 +1,203 @@ +/* + * drm_sysfs.c - Modifications to drm_sysfs_class.c to support + * extra sysfs attribute from DRM. Normal drm_sysfs_class + * does not allow adding attributes. + * + * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com> + * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (c) 2003-2004 IBM Corp. + * + * This file is released under the GPLv2 + * + */ + +#include <linux/config.h> +#include <linux/device.h> +#include <linux/kdev_t.h> +#include <linux/err.h> + +#include "drmP.h" +#include "drm_core.h" + +struct drm_sysfs_class { + struct class_device_attribute attr; + struct class class; +}; +#define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class) + +struct simple_dev { + dev_t dev; + struct class_device class_dev; +}; +#define to_simple_dev(d) container_of(d, struct simple_dev, class_dev) + +static void release_simple_dev(struct class_device *class_dev) +{ + struct simple_dev *s_dev = to_simple_dev(class_dev); + kfree(s_dev); +} + +static ssize_t show_dev(struct class_device *class_dev, char *buf) +{ + struct simple_dev *s_dev = to_simple_dev(class_dev); + return print_dev_t(buf, s_dev->dev); +} + +static void drm_sysfs_class_release(struct class *class) +{ + struct drm_sysfs_class *cs = to_drm_sysfs_class(class); + kfree(cs); +} + +/* Display the version of drm_core. This doesn't work right in current design */ +static ssize_t version_show(struct class *dev, char *buf) +{ + return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR, + CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); +} + +static CLASS_ATTR(version, S_IRUGO, version_show, NULL); + +/** + * drm_sysfs_create - create a struct drm_sysfs_class structure + * @owner: pointer to the module that is to "own" this struct drm_sysfs_class + * @name: pointer to a string for the name of this class. + * + * This is used to create a struct drm_sysfs_class pointer that can then be used + * in calls to drm_sysfs_device_add(). + * + * Note, the pointer created here is to be destroyed when finished by making a + * call to drm_sysfs_destroy(). + */ +struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name) +{ + struct drm_sysfs_class *cs; + int retval; + + cs = kmalloc(sizeof(*cs), GFP_KERNEL); + if (!cs) { + retval = -ENOMEM; + goto error; + } + memset(cs, 0x00, sizeof(*cs)); + + cs->class.name = name; + cs->class.class_release = drm_sysfs_class_release; + cs->class.release = release_simple_dev; + + cs->attr.attr.name = "dev"; + cs->attr.attr.mode = S_IRUGO; + cs->attr.attr.owner = owner; + cs->attr.show = show_dev; + cs->attr.store = NULL; + + retval = class_register(&cs->class); + if (retval) + goto error; + class_create_file(&cs->class, &class_attr_version); + + return cs; + + error: + kfree(cs); + return ERR_PTR(retval); +} + +/** + * drm_sysfs_destroy - destroys a struct drm_sysfs_class structure + * @cs: pointer to the struct drm_sysfs_class that is to be destroyed + * + * Note, the pointer to be destroyed must have been created with a call to + * drm_sysfs_create(). + */ +void drm_sysfs_destroy(struct drm_sysfs_class *cs) +{ + if ((cs == NULL) || (IS_ERR(cs))) + return; + + class_unregister(&cs->class); +} + +static ssize_t show_dri(struct class_device *class_device, char *buf) +{ + drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev; + if (dev->driver->dri_library_name) + return dev->driver->dri_library_name(dev, buf); + return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); +} + +static struct class_device_attribute class_device_attrs[] = { + __ATTR(dri_library_name, S_IRUGO, show_dri, NULL), +}; + +/** + * drm_sysfs_device_add - adds a class device to sysfs for a character driver + * @cs: pointer to the struct drm_sysfs_class that this device should be registered to. + * @dev: the dev_t for the device to be added. + * @device: a pointer to a struct device that is assiociated with this class device. + * @fmt: string for the class device's name + * + * A struct class_device will be created in sysfs, registered to the specified + * class. A "dev" file will be created, showing the dev_t for the device. The + * pointer to the struct class_device will be returned from the call. Any further + * sysfs files that might be required can be created using this pointer. + * Note: the struct drm_sysfs_class passed to this function must have previously been + * created with a call to drm_sysfs_create(). + */ +struct class_device *drm_sysfs_device_add( + struct drm_sysfs_class *cs, drm_head_t * head) +{ + struct simple_dev *s_dev = NULL; + int i, retval; + + if ((cs == NULL) || (IS_ERR(cs))) { + retval = -ENODEV; + goto error; + } + + s_dev = kmalloc(sizeof(*s_dev), GFP_KERNEL); + if (!s_dev) { + retval = -ENOMEM; + goto error; + } + memset(s_dev, 0x00, sizeof(*s_dev)); + + s_dev->dev = MKDEV(DRM_MAJOR, head->minor); + s_dev->class_dev.dev = DRM_PCI_DEV(head->dev->pdev); + s_dev->class_dev.class = &cs->class; + + snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor); + retval = class_device_register(&s_dev->class_dev); + if (retval) + goto error; + + class_device_create_file(&s_dev->class_dev, &cs->attr); + class_set_devdata(&s_dev->class_dev, head); + + for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) + class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]); + + return &s_dev->class_dev; + +error: + kfree(s_dev); + return ERR_PTR(retval); +} + +/** + * drm_sysfs_device_remove - removes a class device that was created with drm_sysfs_device_add() + * @dev: the dev_t of the device that was previously registered. + * + * This call unregisters and cleans up a class device that was created with a + * call to drm_sysfs_device_add() + */ +void drm_sysfs_device_remove(struct class_device *class_dev) +{ + struct simple_dev *s_dev = to_simple_dev(class_dev); + int i; + + for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) + class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]); + + class_device_unregister(&s_dev->class_dev); +} diff --git a/nx-X11/extras/drm/linux-core/drm_vm.c b/nx-X11/extras/drm/linux-core/drm_vm.c new file mode 100644 index 000000000..b2d0bfb12 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/drm_vm.c @@ -0,0 +1,713 @@ +/** + * \file drm_vm.c + * Memory mapping for DRM + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#if defined(__ia64__) +#include <linux/efi.h> +#endif + +static void drm_vm_close(struct vm_area_struct *vma); +static void drm_vm_open(struct vm_area_struct *vma); + +/** + * \c nopage method for AGP virtual memory. + * + * \param vma virtual memory area. + * \param address access address. + * \return pointer to the page structure. + * + * Find the right map and if it's AGP memory find the real physical page to + * map, get the page, increment the use count and return it. + */ +#if __OS_HAS_AGP +static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + drm_map_t *map = NULL; + drm_map_list_t *r_list; + struct list_head *list; + + /* + * Find the right map + */ + if (!drm_core_has_AGP(dev)) + goto vm_nopage_error; + + if (!dev->agp || !dev->agp->cant_use_aperture) + goto vm_nopage_error; + + list_for_each(list, &dev->maplist->head) { + r_list = list_entry(list, drm_map_list_t, head); + map = r_list->map; + if (!map) + continue; + if (r_list->user_token == VM_OFFSET(vma)) + break; + } + + if (map && map->type == _DRM_AGP) { + unsigned long offset = address - vma->vm_start; + unsigned long baddr = map->offset + offset; + struct drm_agp_mem *agpmem; + struct page *page; + +#ifdef __alpha__ + /* + * Adjust to a bus-relative address + */ + baddr -= dev->hose->mem_space->start; +#endif + + /* + * It's AGP memory - find the real physical page to map + */ + for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) { + if (agpmem->bound <= baddr && + agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) + break; + } + + if (!agpmem) + goto vm_nopage_error; + + /* + * Get the page, inc the use count, and return it + */ + offset = (baddr - agpmem->bound) >> PAGE_SHIFT; + page = virt_to_page(__va(agpmem->memory->memory[offset])); + get_page(page); + +#if 0 + /* page_count() not defined everywhere */ + DRM_DEBUG + ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", + baddr, __va(agpmem->memory->memory[offset]), offset, + page_count(page)); +#endif + + return page; + } + vm_nopage_error: + return NOPAGE_SIGBUS; /* Disallow mremap */ +} +#else /* __OS_HAS_AGP */ +static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + return NOPAGE_SIGBUS; +} +#endif /* __OS_HAS_AGP */ + +/** + * \c nopage method for shared virtual memory. + * + * \param vma virtual memory area. + * \param address access address. + * \return pointer to the page structure. + * + * Get the the mapping, find the real physical page to map, get the page, and + * return it. + */ +static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + drm_map_t *map = (drm_map_t *) vma->vm_private_data; + unsigned long offset; + unsigned long i; + struct page *page; + + if (address > vma->vm_end) + return NOPAGE_SIGBUS; /* Disallow mremap */ + if (!map) + return NOPAGE_OOM; /* Nothing allocated */ + + offset = address - vma->vm_start; + i = (unsigned long)map->handle + offset; + page = (map->type == _DRM_CONSISTENT) ? + virt_to_page((void *)i) : vmalloc_to_page((void *)i); + if (!page) + return NOPAGE_OOM; + get_page(page); + + DRM_DEBUG("shm_nopage 0x%lx\n", address); + return page; +} + +/** + * \c close method for shared virtual memory. + * + * \param vma virtual memory area. + * + * Deletes map information if we are the last + * person to close a mapping and it's not in the global maplist. + */ +static void drm_vm_shm_close(struct vm_area_struct *vma) +{ + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + drm_vma_entry_t *pt, *prev, *next; + drm_map_t *map; + drm_map_list_t *r_list; + struct list_head *list; + int found_maps = 0; + + DRM_DEBUG("0x%08lx,0x%08lx\n", + vma->vm_start, vma->vm_end - vma->vm_start); + atomic_dec(&dev->vma_count); + + map = vma->vm_private_data; + + down(&dev->struct_sem); + for (pt = dev->vmalist, prev = NULL; pt; pt = next) { + next = pt->next; + if (pt->vma->vm_private_data == map) + found_maps++; + if (pt->vma == vma) { + if (prev) { + prev->next = pt->next; + } else { + dev->vmalist = pt->next; + } + drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); + } else { + prev = pt; + } + } + /* We were the only map that was found */ + if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { + /* Check to see if we are in the maplist, if we are not, then + * we delete this mappings information. + */ + found_maps = 0; + list = &dev->maplist->head; + list_for_each(list, &dev->maplist->head) { + r_list = list_entry(list, drm_map_list_t, head); + if (r_list->map == map) + found_maps++; + } + + if (!found_maps) { + drm_dma_handle_t dmah; + + switch (map->type) { + case _DRM_REGISTERS: + case _DRM_FRAME_BUFFER: + if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { + int retcode; + retcode = mtrr_del(map->mtrr, + map->offset, + map->size); + DRM_DEBUG("mtrr_del = %d\n", retcode); + } + drm_ioremapfree(map->handle, map->size, dev); + break; + case _DRM_SHM: + vfree(map->handle); + break; + case _DRM_AGP: + case _DRM_SCATTER_GATHER: + break; + case _DRM_CONSISTENT: + dmah.vaddr = map->handle; + dmah.busaddr = map->offset; + dmah.size = map->size; + __drm_pci_free(dev, &dmah); + break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + } + } + up(&dev->struct_sem); +} + +/** + * \c nopage method for DMA virtual memory. + * + * \param vma virtual memory area. + * \param address access address. + * \return pointer to the page structure. + * + * Determine the page number from the page offset and get it from drm_device_dma::pagelist. + */ +static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + drm_device_dma_t *dma = dev->dma; + unsigned long offset; + unsigned long page_nr; + struct page *page; + + if (!dma) + return NOPAGE_SIGBUS; /* Error */ + if (address > vma->vm_end) + return NOPAGE_SIGBUS; /* Disallow mremap */ + if (!dma->pagelist) + return NOPAGE_OOM; /* Nothing allocated */ + + offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ + page_nr = offset >> PAGE_SHIFT; + page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); + + get_page(page); + + DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr); + return page; +} + +/** + * \c nopage method for scatter-gather virtual memory. + * + * \param vma virtual memory area. + * \param address access address. + * \return pointer to the page structure. + * + * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. + */ +static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + drm_map_t *map = (drm_map_t *) vma->vm_private_data; + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + drm_sg_mem_t *entry = dev->sg; + unsigned long offset; + unsigned long map_offset; + unsigned long page_offset; + struct page *page; + + if (!entry) + return NOPAGE_SIGBUS; /* Error */ + if (address > vma->vm_end) + return NOPAGE_SIGBUS; /* Disallow mremap */ + if (!entry->pagelist) + return NOPAGE_OOM; /* Nothing allocated */ + + offset = address - vma->vm_start; + map_offset = map->offset - (unsigned long)dev->sg->virtual; + page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); + page = entry->pagelist[page_offset]; + get_page(page); + + return page; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + +static struct page *drm_vm_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + if (type) + *type = VM_FAULT_MINOR; + return drm_do_vm_nopage(vma, address); +} + +static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + if (type) + *type = VM_FAULT_MINOR; + return drm_do_vm_shm_nopage(vma, address); +} + +static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + if (type) + *type = VM_FAULT_MINOR; + return drm_do_vm_dma_nopage(vma, address); +} + +static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + if (type) + *type = VM_FAULT_MINOR; + return drm_do_vm_sg_nopage(vma, address); +} + +#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */ + +static struct page *drm_vm_nopage(struct vm_area_struct *vma, + unsigned long address, int unused) +{ + return drm_do_vm_nopage(vma, address); +} + +static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, + unsigned long address, int unused) +{ + return drm_do_vm_shm_nopage(vma, address); +} + +static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, + unsigned long address, int unused) +{ + return drm_do_vm_dma_nopage(vma, address); +} + +static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, + unsigned long address, int unused) +{ + return drm_do_vm_sg_nopage(vma, address); +} + +#endif + +/** AGP virtual memory operations */ +static struct vm_operations_struct drm_vm_ops = { + .nopage = drm_vm_nopage, + .open = drm_vm_open, + .close = drm_vm_close, +}; + +/** Shared virtual memory operations */ +static struct vm_operations_struct drm_vm_shm_ops = { + .nopage = drm_vm_shm_nopage, + .open = drm_vm_open, + .close = drm_vm_shm_close, +}; + +/** DMA virtual memory operations */ +static struct vm_operations_struct drm_vm_dma_ops = { + .nopage = drm_vm_dma_nopage, + .open = drm_vm_open, + .close = drm_vm_close, +}; + +/** Scatter-gather virtual memory operations */ +static struct vm_operations_struct drm_vm_sg_ops = { + .nopage = drm_vm_sg_nopage, + .open = drm_vm_open, + .close = drm_vm_close, +}; + +/** + * \c open method for shared virtual memory. + * + * \param vma virtual memory area. + * + * Create a new drm_vma_entry structure as the \p vma private data entry and + * add it to drm_device::vmalist. + */ +static void drm_vm_open(struct vm_area_struct *vma) +{ + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + drm_vma_entry_t *vma_entry; + + DRM_DEBUG("0x%08lx,0x%08lx\n", + vma->vm_start, vma->vm_end - vma->vm_start); + atomic_inc(&dev->vma_count); + + vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); + if (vma_entry) { + down(&dev->struct_sem); + vma_entry->vma = vma; + vma_entry->next = dev->vmalist; + vma_entry->pid = current->pid; + dev->vmalist = vma_entry; + up(&dev->struct_sem); + } +} + +/** + * \c close method for all virtual memory types. + * + * \param vma virtual memory area. + * + * Search the \p vma private data entry in drm_device::vmalist, unlink it, and + * free it. + */ +static void drm_vm_close(struct vm_area_struct *vma) +{ + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + drm_vma_entry_t *pt, *prev; + + DRM_DEBUG("0x%08lx,0x%08lx\n", + vma->vm_start, vma->vm_end - vma->vm_start); + atomic_dec(&dev->vma_count); + + down(&dev->struct_sem); + for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { + if (pt->vma == vma) { + if (prev) { + prev->next = pt->next; + } else { + dev->vmalist = pt->next; + } + drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); + break; + } + } + up(&dev->struct_sem); +} + +/** + * mmap DMA memory. + * + * \param filp file pointer. + * \param vma virtual memory area. + * \return zero on success or a negative number on failure. + * + * Sets the virtual memory area operations structure to vm_dma_ops, the file + * pointer, and calls vm_open(). + */ +static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev; + drm_device_dma_t *dma; + unsigned long length = vma->vm_end - vma->vm_start; + + lock_kernel(); + dev = priv->head->dev; + dma = dev->dma; + DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", + vma->vm_start, vma->vm_end, VM_OFFSET(vma)); + + /* Length must match exact page count */ + if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { + unlock_kernel(); + return -EINVAL; + } + unlock_kernel(); + + vma->vm_ops = &drm_vm_dma_ops; + +#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ + vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ +#else + vma->vm_flags |= VM_RESERVED; /* Don't swap */ +#endif + + vma->vm_file = filp; /* Needed for drm_vm_open() */ + drm_vm_open(vma); + return 0; +} + +unsigned long drm_core_get_map_ofs(drm_map_t * map) +{ + return map->offset; +} +EXPORT_SYMBOL(drm_core_get_map_ofs); + +unsigned long drm_core_get_reg_ofs(struct drm_device *dev) +{ +#ifdef __alpha__ + return dev->hose->dense_mem_base - dev->hose->mem_space->start; +#else + return 0; +#endif +} +EXPORT_SYMBOL(drm_core_get_reg_ofs); + +/** + * mmap DMA memory. + * + * \param filp file pointer. + * \param vma virtual memory area. + * \return zero on success or a negative number on failure. + * + * If the virtual memory area has no offset associated with it then it's a DMA + * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, + * checks that the restricted flag is not set, sets the virtual memory operations + * according to the mapping type and remaps the pages. Finally sets the file + * pointer and calls vm_open(). + */ +int drm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_map_t *map = NULL; + drm_map_list_t *r_list; + unsigned long offset = 0; + struct list_head *list; + + DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", + vma->vm_start, vma->vm_end, VM_OFFSET(vma)); + + if (!priv->authenticated) + return -EACCES; + + /* We check for "dma". On Apple's UniNorth, it's valid to have + * the AGP mapped at physical address 0 + * --BenH. + */ + if (!VM_OFFSET(vma) +#if __OS_HAS_AGP + && (!dev->agp + || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) +#endif + ) + return drm_mmap_dma(filp, vma); + + /* A sequential search of a linked list is + fine here because: 1) there will only be + about 5-10 entries in the list and, 2) a + DRI client only has to do this mapping + once, so it doesn't have to be optimized + for performance, even if the list was a + bit longer. */ + list_for_each(list, &dev->maplist->head) { + + r_list = list_entry(list, drm_map_list_t, head); + map = r_list->map; + if (!map) + continue; + if (r_list->user_token == VM_OFFSET(vma)) + break; + } + + if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) + return -EPERM; + + /* Check for valid size. */ + if (map->size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { + vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); +#if defined(__i386__) || defined(__x86_64__) + pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; +#else + /* Ye gads this is ugly. With more thought + we could move this up higher and use + `protection_map' instead. */ + vma->vm_page_prot = + __pgprot(pte_val + (pte_wrprotect + (__pte(pgprot_val(vma->vm_page_prot))))); +#endif + } + + switch (map->type) { + case _DRM_AGP: + if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { + /* + * On some platforms we can't talk to bus dma address from the CPU, so for + * memory of type DRM_AGP, we'll deal with sorting out the real physical + * pages and mappings in nopage() + */ +#if defined(__powerpc__) + pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; +#endif + vma->vm_ops = &drm_vm_ops; + break; + } + /* fall through to _DRM_FRAME_BUFFER... */ + case _DRM_FRAME_BUFFER: + case _DRM_REGISTERS: +#if defined(__i386__) || defined(__x86_64__) + if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { + pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; + pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; + } +#elif defined(__powerpc__) + pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; + if (map->type == _DRM_REGISTERS) + pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED; +#endif + vma->vm_flags |= VM_IO; /* not in core dump */ +#if defined(__ia64__) + if (efi_range_is_wc(vma->vm_start, vma->vm_end - + vma->vm_start)) + vma->vm_page_prot = + pgprot_writecombine(vma->vm_page_prot); + else + vma->vm_page_prot = + pgprot_noncached(vma->vm_page_prot); +#endif + offset = dev->driver->get_reg_ofs(dev); +#ifdef __sparc__ + if (io_remap_pfn_range(vma, vma->vm_start, + (map->offset + offset) >>PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) +#else + if (remap_pfn_range(vma, vma->vm_start, + (map->offset + offset) >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) +#endif + return -EAGAIN; + DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," + " offset = 0x%lx\n", + map->type, + vma->vm_start, vma->vm_end, map->offset + offset); + vma->vm_ops = &drm_vm_ops; + break; + case _DRM_SHM: + case _DRM_CONSISTENT: + /* Consistent memory is much like shared memory. The + * only difference is that drm_vm_shm_nopage must use + * virt_to_page instead of vmalloc_to_page. */ + vma->vm_ops = &drm_vm_shm_ops; + vma->vm_private_data = (void *)map; + /* Don't let this area swap. Change when + DRM_KERNEL advisory is supported. */ +#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ + vma->vm_flags |= VM_LOCKED; +#else + vma->vm_flags |= VM_RESERVED; +#endif + break; + case _DRM_SCATTER_GATHER: + vma->vm_ops = &drm_vm_sg_ops; + vma->vm_private_data = (void *)map; +#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ + vma->vm_flags |= VM_LOCKED; +#else + vma->vm_flags |= VM_RESERVED; +#endif + break; + default: + return -EINVAL; /* This should never happen. */ + } +#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ + vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ +#else + vma->vm_flags |= VM_RESERVED; /* Don't swap */ +#endif + + vma->vm_file = filp; /* Needed for drm_vm_open() */ + drm_vm_open(vma); + return 0; +} +EXPORT_SYMBOL(drm_mmap); diff --git a/nx-X11/extras/drm/linux-core/ffb_context.c b/nx-X11/extras/drm/linux-core/ffb_context.c new file mode 100644 index 000000000..2344bde98 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/ffb_context.c @@ -0,0 +1,582 @@ +/* $Id: ffb_context.c,v 1.1.1.2 2005/10/18 02:24:09 kem Exp $ + * ffb_context.c: Creator/Creator3D DRI/DRM context switching. + * + * Copyright (C) 2000 David S. Miller (davem@redhat.com) + * + * Almost entirely stolen from tdfx_context.c, see there + * for authors. + */ + +#include <linux/sched.h> +#include <asm/upa.h> + +#include "drmP.h" +#include "ffb_drv.h" + +static int ffb_alloc_queue(drm_device_t * dev, int is_2d_only) { + ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; + int i; + + for (i = 0; i < FFB_MAX_CTXS; i++) { + if (fpriv->hw_state[i] == NULL) + break; + } + if (i == FFB_MAX_CTXS) + return -1; + + fpriv->hw_state[i] = kmalloc(sizeof(struct ffb_hw_context), GFP_KERNEL); + if (fpriv->hw_state[i] == NULL) + return -1; + + fpriv->hw_state[i]->is_2d_only = is_2d_only; + + /* Plus one because 0 is the special DRM_KERNEL_CONTEXT. */ + return i + 1; +} + +static void ffb_save_context(ffb_dev_priv_t * fpriv, int idx) +{ + ffb_fbcPtr ffb = fpriv->regs; + struct ffb_hw_context *ctx; + int i; + + ctx = fpriv->hw_state[idx - 1]; + if (idx == 0 || ctx == NULL) + return; + + if (ctx->is_2d_only) { + /* 2D applications only care about certain pieces + * of state. + */ + ctx->drawop = upa_readl(&ffb->drawop); + ctx->ppc = upa_readl(&ffb->ppc); + ctx->wid = upa_readl(&ffb->wid); + ctx->fg = upa_readl(&ffb->fg); + ctx->bg = upa_readl(&ffb->bg); + ctx->xclip = upa_readl(&ffb->xclip); + ctx->fbc = upa_readl(&ffb->fbc); + ctx->rop = upa_readl(&ffb->rop); + ctx->cmp = upa_readl(&ffb->cmp); + ctx->matchab = upa_readl(&ffb->matchab); + ctx->magnab = upa_readl(&ffb->magnab); + ctx->pmask = upa_readl(&ffb->pmask); + ctx->xpmask = upa_readl(&ffb->xpmask); + ctx->lpat = upa_readl(&ffb->lpat); + ctx->fontxy = upa_readl(&ffb->fontxy); + ctx->fontw = upa_readl(&ffb->fontw); + ctx->fontinc = upa_readl(&ffb->fontinc); + + /* stencil/stencilctl only exists on FFB2+ and later + * due to the introduction of 3DRAM-III. + */ + if (fpriv->ffb_type == ffb2_vertical_plus || + fpriv->ffb_type == ffb2_horizontal_plus) { + ctx->stencil = upa_readl(&ffb->stencil); + ctx->stencilctl = upa_readl(&ffb->stencilctl); + } + + for (i = 0; i < 32; i++) + ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]); + ctx->ucsr = upa_readl(&ffb->ucsr); + return; + } + + /* Fetch drawop. */ + ctx->drawop = upa_readl(&ffb->drawop); + + /* If we were saving the vertex registers, this is where + * we would do it. We would save 32 32-bit words starting + * at ffb->suvtx. + */ + + /* Capture rendering attributes. */ + + ctx->ppc = upa_readl(&ffb->ppc); /* Pixel Processor Control */ + ctx->wid = upa_readl(&ffb->wid); /* Current WID */ + ctx->fg = upa_readl(&ffb->fg); /* Constant FG color */ + ctx->bg = upa_readl(&ffb->bg); /* Constant BG color */ + ctx->consty = upa_readl(&ffb->consty); /* Constant Y */ + ctx->constz = upa_readl(&ffb->constz); /* Constant Z */ + ctx->xclip = upa_readl(&ffb->xclip); /* X plane clip */ + ctx->dcss = upa_readl(&ffb->dcss); /* Depth Cue Scale Slope */ + ctx->vclipmin = upa_readl(&ffb->vclipmin); /* Primary XY clip, minimum */ + ctx->vclipmax = upa_readl(&ffb->vclipmax); /* Primary XY clip, maximum */ + ctx->vclipzmin = upa_readl(&ffb->vclipzmin); /* Primary Z clip, minimum */ + ctx->vclipzmax = upa_readl(&ffb->vclipzmax); /* Primary Z clip, maximum */ + ctx->dcsf = upa_readl(&ffb->dcsf); /* Depth Cue Scale Front Bound */ + ctx->dcsb = upa_readl(&ffb->dcsb); /* Depth Cue Scale Back Bound */ + ctx->dczf = upa_readl(&ffb->dczf); /* Depth Cue Scale Z Front */ + ctx->dczb = upa_readl(&ffb->dczb); /* Depth Cue Scale Z Back */ + ctx->blendc = upa_readl(&ffb->blendc); /* Alpha Blend Control */ + ctx->blendc1 = upa_readl(&ffb->blendc1); /* Alpha Blend Color 1 */ + ctx->blendc2 = upa_readl(&ffb->blendc2); /* Alpha Blend Color 2 */ + ctx->fbc = upa_readl(&ffb->fbc); /* Frame Buffer Control */ + ctx->rop = upa_readl(&ffb->rop); /* Raster Operation */ + ctx->cmp = upa_readl(&ffb->cmp); /* Compare Controls */ + ctx->matchab = upa_readl(&ffb->matchab); /* Buffer A/B Match Ops */ + ctx->matchc = upa_readl(&ffb->matchc); /* Buffer C Match Ops */ + ctx->magnab = upa_readl(&ffb->magnab); /* Buffer A/B Magnitude Ops */ + ctx->magnc = upa_readl(&ffb->magnc); /* Buffer C Magnitude Ops */ + ctx->pmask = upa_readl(&ffb->pmask); /* RGB Plane Mask */ + ctx->xpmask = upa_readl(&ffb->xpmask); /* X Plane Mask */ + ctx->ypmask = upa_readl(&ffb->ypmask); /* Y Plane Mask */ + ctx->zpmask = upa_readl(&ffb->zpmask); /* Z Plane Mask */ + + /* Auxiliary Clips. */ + ctx->auxclip0min = upa_readl(&ffb->auxclip[0].min); + ctx->auxclip0max = upa_readl(&ffb->auxclip[0].max); + ctx->auxclip1min = upa_readl(&ffb->auxclip[1].min); + ctx->auxclip1max = upa_readl(&ffb->auxclip[1].max); + ctx->auxclip2min = upa_readl(&ffb->auxclip[2].min); + ctx->auxclip2max = upa_readl(&ffb->auxclip[2].max); + ctx->auxclip3min = upa_readl(&ffb->auxclip[3].min); + ctx->auxclip3max = upa_readl(&ffb->auxclip[3].max); + + ctx->lpat = upa_readl(&ffb->lpat); /* Line Pattern */ + ctx->fontxy = upa_readl(&ffb->fontxy); /* XY Font Coordinate */ + ctx->fontw = upa_readl(&ffb->fontw); /* Font Width */ + ctx->fontinc = upa_readl(&ffb->fontinc); /* Font X/Y Increment */ + + /* These registers/features only exist on FFB2 and later chips. */ + if (fpriv->ffb_type >= ffb2_prototype) { + ctx->dcss1 = upa_readl(&ffb->dcss1); /* Depth Cue Scale Slope 1 */ + ctx->dcss2 = upa_readl(&ffb->dcss2); /* Depth Cue Scale Slope 2 */ + ctx->dcss2 = upa_readl(&ffb->dcss3); /* Depth Cue Scale Slope 3 */ + ctx->dcs2 = upa_readl(&ffb->dcs2); /* Depth Cue Scale 2 */ + ctx->dcs3 = upa_readl(&ffb->dcs3); /* Depth Cue Scale 3 */ + ctx->dcs4 = upa_readl(&ffb->dcs4); /* Depth Cue Scale 4 */ + ctx->dcd2 = upa_readl(&ffb->dcd2); /* Depth Cue Depth 2 */ + ctx->dcd3 = upa_readl(&ffb->dcd3); /* Depth Cue Depth 3 */ + ctx->dcd4 = upa_readl(&ffb->dcd4); /* Depth Cue Depth 4 */ + + /* And stencil/stencilctl only exists on FFB2+ and later + * due to the introduction of 3DRAM-III. + */ + if (fpriv->ffb_type == ffb2_vertical_plus || + fpriv->ffb_type == ffb2_horizontal_plus) { + ctx->stencil = upa_readl(&ffb->stencil); + ctx->stencilctl = upa_readl(&ffb->stencilctl); + } + } + + /* Save the 32x32 area pattern. */ + for (i = 0; i < 32; i++) + ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]); + + /* Finally, stash away the User Constol/Status Register. */ + ctx->ucsr = upa_readl(&ffb->ucsr); +} + +static void ffb_restore_context(ffb_dev_priv_t * fpriv, int old, int idx) +{ + ffb_fbcPtr ffb = fpriv->regs; + struct ffb_hw_context *ctx; + int i; + + ctx = fpriv->hw_state[idx - 1]; + if (idx == 0 || ctx == NULL) + return; + + if (ctx->is_2d_only) { + /* 2D applications only care about certain pieces + * of state. + */ + upa_writel(ctx->drawop, &ffb->drawop); + + /* If we were restoring the vertex registers, this is where + * we would do it. We would restore 32 32-bit words starting + * at ffb->suvtx. + */ + + upa_writel(ctx->ppc, &ffb->ppc); + upa_writel(ctx->wid, &ffb->wid); + upa_writel(ctx->fg, &ffb->fg); + upa_writel(ctx->bg, &ffb->bg); + upa_writel(ctx->xclip, &ffb->xclip); + upa_writel(ctx->fbc, &ffb->fbc); + upa_writel(ctx->rop, &ffb->rop); + upa_writel(ctx->cmp, &ffb->cmp); + upa_writel(ctx->matchab, &ffb->matchab); + upa_writel(ctx->magnab, &ffb->magnab); + upa_writel(ctx->pmask, &ffb->pmask); + upa_writel(ctx->xpmask, &ffb->xpmask); + upa_writel(ctx->lpat, &ffb->lpat); + upa_writel(ctx->fontxy, &ffb->fontxy); + upa_writel(ctx->fontw, &ffb->fontw); + upa_writel(ctx->fontinc, &ffb->fontinc); + + /* stencil/stencilctl only exists on FFB2+ and later + * due to the introduction of 3DRAM-III. + */ + if (fpriv->ffb_type == ffb2_vertical_plus || + fpriv->ffb_type == ffb2_horizontal_plus) { + upa_writel(ctx->stencil, &ffb->stencil); + upa_writel(ctx->stencilctl, &ffb->stencilctl); + upa_writel(0x80000000, &ffb->fbc); + upa_writel((ctx->stencilctl | 0x80000), + &ffb->rawstencilctl); + upa_writel(ctx->fbc, &ffb->fbc); + } + + for (i = 0; i < 32; i++) + upa_writel(ctx->area_pattern[i], &ffb->pattern[i]); + upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr); + return; + } + + /* Restore drawop. */ + upa_writel(ctx->drawop, &ffb->drawop); + + /* If we were restoring the vertex registers, this is where + * we would do it. We would restore 32 32-bit words starting + * at ffb->suvtx. + */ + + /* Restore rendering attributes. */ + + upa_writel(ctx->ppc, &ffb->ppc); /* Pixel Processor Control */ + upa_writel(ctx->wid, &ffb->wid); /* Current WID */ + upa_writel(ctx->fg, &ffb->fg); /* Constant FG color */ + upa_writel(ctx->bg, &ffb->bg); /* Constant BG color */ + upa_writel(ctx->consty, &ffb->consty); /* Constant Y */ + upa_writel(ctx->constz, &ffb->constz); /* Constant Z */ + upa_writel(ctx->xclip, &ffb->xclip); /* X plane clip */ + upa_writel(ctx->dcss, &ffb->dcss); /* Depth Cue Scale Slope */ + upa_writel(ctx->vclipmin, &ffb->vclipmin); /* Primary XY clip, minimum */ + upa_writel(ctx->vclipmax, &ffb->vclipmax); /* Primary XY clip, maximum */ + upa_writel(ctx->vclipzmin, &ffb->vclipzmin); /* Primary Z clip, minimum */ + upa_writel(ctx->vclipzmax, &ffb->vclipzmax); /* Primary Z clip, maximum */ + upa_writel(ctx->dcsf, &ffb->dcsf); /* Depth Cue Scale Front Bound */ + upa_writel(ctx->dcsb, &ffb->dcsb); /* Depth Cue Scale Back Bound */ + upa_writel(ctx->dczf, &ffb->dczf); /* Depth Cue Scale Z Front */ + upa_writel(ctx->dczb, &ffb->dczb); /* Depth Cue Scale Z Back */ + upa_writel(ctx->blendc, &ffb->blendc); /* Alpha Blend Control */ + upa_writel(ctx->blendc1, &ffb->blendc1); /* Alpha Blend Color 1 */ + upa_writel(ctx->blendc2, &ffb->blendc2); /* Alpha Blend Color 2 */ + upa_writel(ctx->fbc, &ffb->fbc); /* Frame Buffer Control */ + upa_writel(ctx->rop, &ffb->rop); /* Raster Operation */ + upa_writel(ctx->cmp, &ffb->cmp); /* Compare Controls */ + upa_writel(ctx->matchab, &ffb->matchab); /* Buffer A/B Match Ops */ + upa_writel(ctx->matchc, &ffb->matchc); /* Buffer C Match Ops */ + upa_writel(ctx->magnab, &ffb->magnab); /* Buffer A/B Magnitude Ops */ + upa_writel(ctx->magnc, &ffb->magnc); /* Buffer C Magnitude Ops */ + upa_writel(ctx->pmask, &ffb->pmask); /* RGB Plane Mask */ + upa_writel(ctx->xpmask, &ffb->xpmask); /* X Plane Mask */ + upa_writel(ctx->ypmask, &ffb->ypmask); /* Y Plane Mask */ + upa_writel(ctx->zpmask, &ffb->zpmask); /* Z Plane Mask */ + + /* Auxiliary Clips. */ + upa_writel(ctx->auxclip0min, &ffb->auxclip[0].min); + upa_writel(ctx->auxclip0max, &ffb->auxclip[0].max); + upa_writel(ctx->auxclip1min, &ffb->auxclip[1].min); + upa_writel(ctx->auxclip1max, &ffb->auxclip[1].max); + upa_writel(ctx->auxclip2min, &ffb->auxclip[2].min); + upa_writel(ctx->auxclip2max, &ffb->auxclip[2].max); + upa_writel(ctx->auxclip3min, &ffb->auxclip[3].min); + upa_writel(ctx->auxclip3max, &ffb->auxclip[3].max); + + upa_writel(ctx->lpat, &ffb->lpat); /* Line Pattern */ + upa_writel(ctx->fontxy, &ffb->fontxy); /* XY Font Coordinate */ + upa_writel(ctx->fontw, &ffb->fontw); /* Font Width */ + upa_writel(ctx->fontinc, &ffb->fontinc); /* Font X/Y Increment */ + + /* These registers/features only exist on FFB2 and later chips. */ + if (fpriv->ffb_type >= ffb2_prototype) { + upa_writel(ctx->dcss1, &ffb->dcss1); /* Depth Cue Scale Slope 1 */ + upa_writel(ctx->dcss2, &ffb->dcss2); /* Depth Cue Scale Slope 2 */ + upa_writel(ctx->dcss3, &ffb->dcss2); /* Depth Cue Scale Slope 3 */ + upa_writel(ctx->dcs2, &ffb->dcs2); /* Depth Cue Scale 2 */ + upa_writel(ctx->dcs3, &ffb->dcs3); /* Depth Cue Scale 3 */ + upa_writel(ctx->dcs4, &ffb->dcs4); /* Depth Cue Scale 4 */ + upa_writel(ctx->dcd2, &ffb->dcd2); /* Depth Cue Depth 2 */ + upa_writel(ctx->dcd3, &ffb->dcd3); /* Depth Cue Depth 3 */ + upa_writel(ctx->dcd4, &ffb->dcd4); /* Depth Cue Depth 4 */ + + /* And stencil/stencilctl only exists on FFB2+ and later + * due to the introduction of 3DRAM-III. + */ + if (fpriv->ffb_type == ffb2_vertical_plus || + fpriv->ffb_type == ffb2_horizontal_plus) { + /* Unfortunately, there is a hardware bug on + * the FFB2+ chips which prevents a normal write + * to the stencil control register from working + * as it should. + * + * The state controlled by the FFB stencilctl register + * really gets transferred to the per-buffer instances + * of the stencilctl register in the 3DRAM chips. + * + * The bug is that FFB does not update buffer C correctly, + * so we have to do it by hand for them. + */ + + /* This will update buffers A and B. */ + upa_writel(ctx->stencil, &ffb->stencil); + upa_writel(ctx->stencilctl, &ffb->stencilctl); + + /* Force FFB to use buffer C 3dram regs. */ + upa_writel(0x80000000, &ffb->fbc); + upa_writel((ctx->stencilctl | 0x80000), + &ffb->rawstencilctl); + + /* Now restore the correct FBC controls. */ + upa_writel(ctx->fbc, &ffb->fbc); + } + } + + /* Restore the 32x32 area pattern. */ + for (i = 0; i < 32; i++) + upa_writel(ctx->area_pattern[i], &ffb->pattern[i]); + + /* Finally, stash away the User Constol/Status Register. + * The only state we really preserve here is the picking + * control. + */ + upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr); +} + +#define FFB_UCSR_FB_BUSY 0x01000000 +#define FFB_UCSR_RP_BUSY 0x02000000 +#define FFB_UCSR_ALL_BUSY (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY) + +static void FFBWait(ffb_fbcPtr ffb) +{ + int limit = 100000; + + do { + u32 regval = upa_readl(&ffb->ucsr); + + if ((regval & FFB_UCSR_ALL_BUSY) == 0) + break; + } while (--limit); +} + +int ffb_context_switch(drm_device_t * dev, int old, int new) { + ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; + +#if DRM_DMA_HISTOGRAM + dev->ctx_start = get_cycles(); +#endif + + DRM_DEBUG("Context switch from %d to %d\n", old, new); + + if (new == dev->last_context || dev->last_context == 0) { + dev->last_context = new; + return 0; + } + + FFBWait(fpriv->regs); + ffb_save_context(fpriv, old); + ffb_restore_context(fpriv, old, new); + FFBWait(fpriv->regs); + + dev->last_context = new; + + return 0; +} + +int ffb_resctx(struct inode * inode, struct file * filp, unsigned int cmd, + unsigned long arg) { + drm_ctx_res_t res; + drm_ctx_t ctx; + int i; + + DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); + if (copy_from_user(&res, (drm_ctx_res_t __user *) arg, sizeof(res))) + return -EFAULT; + if (res.count >= DRM_RESERVED_CONTEXTS) { + memset(&ctx, 0, sizeof(ctx)); + for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { + ctx.handle = i; + if (copy_to_user(&res.contexts[i], &i, sizeof(i))) + return -EFAULT; + } + } + res.count = DRM_RESERVED_CONTEXTS; + if (copy_to_user((drm_ctx_res_t __user *) arg, &res, sizeof(res))) + return -EFAULT; + return 0; +} + +int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd, + unsigned long arg) { + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + int idx; + + if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + return -EFAULT; + idx = ffb_alloc_queue(dev, (ctx.flags & _DRM_CONTEXT_2DONLY)); + if (idx < 0) + return -ENFILE; + + DRM_DEBUG("%d\n", ctx.handle); + ctx.handle = idx; + if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx))) + return -EFAULT; + return 0; +} + +int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd, + unsigned long arg) { + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; + struct ffb_hw_context *hwctx; + drm_ctx_t ctx; + int idx; + + if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + return -EFAULT; + + idx = ctx.handle; + if (idx <= 0 || idx >= FFB_MAX_CTXS) + return -EINVAL; + + hwctx = fpriv->hw_state[idx - 1]; + if (hwctx == NULL) + return -EINVAL; + + if ((ctx.flags & _DRM_CONTEXT_2DONLY) == 0) + hwctx->is_2d_only = 0; + else + hwctx->is_2d_only = 1; + + return 0; +} + +int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd, + unsigned long arg) { + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; + struct ffb_hw_context *hwctx; + drm_ctx_t ctx; + int idx; + + if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + return -EFAULT; + + idx = ctx.handle; + if (idx <= 0 || idx >= FFB_MAX_CTXS) + return -EINVAL; + + hwctx = fpriv->hw_state[idx - 1]; + if (hwctx == NULL) + return -EINVAL; + + if (hwctx->is_2d_only != 0) + ctx.flags = _DRM_CONTEXT_2DONLY; + else + ctx.flags = 0; + + if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx))) + return -EFAULT; + + return 0; +} + +int ffb_switchctx(struct inode * inode, struct file * filp, unsigned int cmd, + unsigned long arg) { + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + return -EFAULT; + DRM_DEBUG("%d\n", ctx.handle); + return ffb_context_switch(dev, dev->last_context, ctx.handle); +} + +int ffb_newctx(struct inode * inode, struct file * filp, unsigned int cmd, + unsigned long arg) { + drm_ctx_t ctx; + + if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + return -EFAULT; + DRM_DEBUG("%d\n", ctx.handle); + + return 0; +} + +int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd, + unsigned long arg) { + drm_ctx_t ctx; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; + int idx; + + if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + return -EFAULT; + DRM_DEBUG("%d\n", ctx.handle); + + idx = ctx.handle - 1; + if (idx < 0 || idx >= FFB_MAX_CTXS) + return -EINVAL; + + if (fpriv->hw_state[idx] != NULL) { + kfree(fpriv->hw_state[idx]); + fpriv->hw_state[idx] = NULL; + } + return 0; +} + +static void ffb_driver_reclaim_buffers_locked(drm_device_t * dev) +{ + ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; + int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock); + int idx; + + idx = context - 1; + if (fpriv && + context != DRM_KERNEL_CONTEXT && fpriv->hw_state[idx] != NULL) { + kfree(fpriv->hw_state[idx]); + fpriv->hw_state[idx] = NULL; + } +} + +static void ffb_driver_lastclose(drm_device_t * dev) +{ + if (dev->dev_private) + kfree(dev->dev_private); +} + +static void ffb_driver_unload(drm_device_t * dev) +{ + if (ffb_position != NULL) + kfree(ffb_position); +} + +static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev) +{ + dev->lock.filp = 0; + { + __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; + unsigned int old, new, prev, ctx; + + ctx = lock.context; + do { + old = *plock; + new = ctx; + prev = cmpxchg(plock, old, new); + } while (prev != old); + } + wake_up_interruptible(&dev->lock.lock_queue); +} + +unsigned long ffb_driver_get_map_ofs(drm_map_t * map) +{ + return (map->offset & 0xffffffff); +} + +unsigned long ffb_driver_get_reg_ofs(drm_device_t * dev) +{ + ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private; + + if (ffb_priv) + return ffb_priv->card_phys_base; + + return 0; +} diff --git a/nx-X11/extras/drm/linux-core/ffb_drv.c b/nx-X11/extras/drm/linux-core/ffb_drv.c new file mode 100644 index 000000000..bd5f9f9f8 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/ffb_drv.c @@ -0,0 +1,330 @@ +/* $Id: ffb_drv.c,v 1.1.1.2 2005/10/18 02:24:09 kem Exp $ + * ffb_drv.c: Creator/Creator3D direct rendering driver. + * + * Copyright (C) 2000 David S. Miller (davem@redhat.com) + */ + +#include <linux/config.h> +#include <linux/sched.h> +#include <linux/smp_lock.h> +#include <asm/shmparam.h> +#include <asm/oplib.h> +#include <asm/upa.h> + +#include "drmP.h" +#include "ffb_drv.h" + +#define DRIVER_AUTHOR "David S. Miller" + +#define DRIVER_NAME "ffb" +#define DRIVER_DESC "Creator/Creator3D" +#define DRIVER_DATE "20000517" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 1 + +typedef struct _ffb_position_t { + int node; + int root; +} ffb_position_t; + +static ffb_position_t *ffb_position; + +static void get_ffb_type(ffb_dev_priv_t *ffb_priv, int instance) +{ + volatile unsigned char *strap_bits; + unsigned char val; + + strap_bits = (volatile unsigned char *) + (ffb_priv->card_phys_base + 0x00200000UL); + + /* Don't ask, you have to read the value twice for whatever + * reason to get correct contents. + */ + val = upa_readb(strap_bits); + val = upa_readb(strap_bits); + switch (val & 0x78) { + case (0x0 << 5) | (0x0 << 3): + ffb_priv->ffb_type = ffb1_prototype; + printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance); + break; + case (0x0 << 5) | (0x1 << 3): + ffb_priv->ffb_type = ffb1_standard; + printk("ffb%d: Detected FFB1\n", instance); + break; + case (0x0 << 5) | (0x3 << 3): + ffb_priv->ffb_type = ffb1_speedsort; + printk("ffb%d: Detected FFB1-SpeedSort\n", instance); + break; + case (0x1 << 5) | (0x0 << 3): + ffb_priv->ffb_type = ffb2_prototype; + printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n", instance); + break; + case (0x1 << 5) | (0x1 << 3): + ffb_priv->ffb_type = ffb2_vertical; + printk("ffb%d: Detected FFB2/vertical\n", instance); + break; + case (0x1 << 5) | (0x2 << 3): + ffb_priv->ffb_type = ffb2_vertical_plus; + printk("ffb%d: Detected FFB2+/vertical\n", instance); + break; + case (0x2 << 5) | (0x0 << 3): + ffb_priv->ffb_type = ffb2_horizontal; + printk("ffb%d: Detected FFB2/horizontal\n", instance); + break; + case (0x2 << 5) | (0x2 << 3): + ffb_priv->ffb_type = ffb2_horizontal; + printk("ffb%d: Detected FFB2+/horizontal\n", instance); + break; + default: + ffb_priv->ffb_type = ffb2_vertical; + printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n", instance, val); + break; + }; +} + +static void ffb_apply_upa_parent_ranges(int parent, + struct linux_prom64_registers *regs) +{ + struct linux_prom64_ranges ranges[PROMREG_MAX]; + char name[128]; + int len, i; + + prom_getproperty(parent, "name", name, sizeof(name)); + if (strcmp(name, "upa") != 0) + return; + + len = prom_getproperty(parent, "ranges", (void *) ranges, sizeof(ranges)); + if (len <= 0) + return; + + len /= sizeof(struct linux_prom64_ranges); + for (i = 0; i < len; i++) { + struct linux_prom64_ranges *rng = &ranges[i]; + u64 phys_addr = regs->phys_addr; + + if (phys_addr >= rng->ot_child_base && + phys_addr < (rng->ot_child_base + rng->or_size)) { + regs->phys_addr -= rng->ot_child_base; + regs->phys_addr += rng->ot_parent_base; + return; + } + } + + return; +} + +static int ffb_init_one(drm_device_t *dev, int prom_node, int parent_node, + int instance) +{ + struct linux_prom64_registers regs[2*PROMREG_MAX]; + ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *)dev->dev_private; + int i; + + ffb_priv->prom_node = prom_node; + if (prom_getproperty(ffb_priv->prom_node, "reg", + (void *)regs, sizeof(regs)) <= 0) { + return -EINVAL; + } + ffb_apply_upa_parent_ranges(parent_node, ®s[0]); + ffb_priv->card_phys_base = regs[0].phys_addr; + ffb_priv->regs = (ffb_fbcPtr) + (regs[0].phys_addr + 0x00600000UL); + get_ffb_type(ffb_priv, instance); + for (i = 0; i < FFB_MAX_CTXS; i++) + ffb_priv->hw_state[i] = NULL; + + return 0; +} + +static int __init ffb_count_siblings(int root) +{ + int node, child, count = 0; + + child = prom_getchild(root); + for (node = prom_searchsiblings(child, "SUNW,ffb"); node; + node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) + count++; + + return count; +} + +static int __init ffb_scan_siblings(int root, int instance) +{ + int node, child; + + child = prom_getchild(root); + for (node = prom_searchsiblings(child, "SUNW,ffb"); node; + node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) { + ffb_position[instance].node = node; + ffb_position[instance].root = root; + instance++; + } + + return instance; +} + +static drm_map_t *ffb_find_map(struct file *filp, unsigned long off) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev; + drm_map_list_t *r_list; + struct list_head *list; + drm_map_t *map; + + if (!priv || (dev = priv->dev) == NULL) + return NULL; + + list_for_each(list, &dev->maplist->head) { + unsigned long uoff; + + r_list = (drm_map_list_t *)list; + map = r_list->map; + if (!map) + continue; + uoff = (map->offset & 0xffffffff); + if (uoff == off) + return map; + } + + return NULL; +} + +unsigned long ffb_get_unmapped_area(struct file *filp, + unsigned long hint, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + drm_map_t *map = ffb_find_map(filp, pgoff << PAGE_SHIFT); + unsigned long addr = -ENOMEM; + + if (!map) + return get_unmapped_area(NULL, hint, len, pgoff, flags); + + if (map->type == _DRM_FRAME_BUFFER || + map->type == _DRM_REGISTERS) { +#ifdef HAVE_ARCH_FB_UNMAPPED_AREA + addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags); +#else + addr = get_unmapped_area(NULL, hint, len, pgoff, flags); +#endif + } else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) { + unsigned long slack = SHMLBA - PAGE_SIZE; + + addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags); + if (!(addr & ~PAGE_MASK)) { + unsigned long kvirt = (unsigned long) map->handle; + + if ((kvirt & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { + unsigned long koff, aoff; + + koff = kvirt & (SHMLBA - 1); + aoff = addr & (SHMLBA - 1); + if (koff < aoff) + koff += SHMLBA; + + addr += (koff - aoff); + } + } + } else { + addr = get_unmapped_area(NULL, hint, len, pgoff, flags); + } + + return addr; +} + +/* This functions must be here since it references drm_numdevs) + * which drm_drv.h declares. + */ +static int ffb_driver_firstopen(drm_device_t *dev) +{ + ffb_dev_priv_t *ffb_priv; + drm_device_t *temp_dev; + int ret = 0; + int i; + + /* Check for the case where no device was found. */ + if (ffb_position == NULL) + return -ENODEV; + + /* Find our instance number by finding our device in dev structure */ + for (i = 0; i < drm_numdevs; i++) { + temp_dev = &(drm_device[i]); + if(temp_dev == dev) + break; + } + + if (i == drm_numdevs) + return -ENODEV; + + ffb_priv = kmalloc(sizeof(ffb_dev_priv_t), GFP_KERNEL); + if (!ffb_priv) + return -ENOMEM; + memset(ffb_priv, 0, sizeof(*ffb_priv)); + dev->dev_private = ffb_priv; + + ret = ffb_init_one(dev, + ffb_position[i].node, + ffb_position[i].root, + i); + return ret; +} + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + ffb_PCI_IDS +}; + +static struct drm_driver ffb_driver = { + .release = ffb_driver_reclaim_buffers_locked, + .firstopen = ffb_driver_firstopen, + .lastclose = ffb_driver_lastclose, + .unload = ffb_driver_unload, + .kernel_context_switch = ffb_context_switch, + .kernel_context_switch_unlock = ffb_driver_kernel_context_switch_unlock, + .get_map_ofs = ffb_driver_get_map_ofs, + .get_reg_ofs = ffb_driver_get_reg_ofs, + .reclaim_buffers = drm_core_reclaim_buffers, + fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .fasync = drm_fasync, + .poll = drm_poll, + .get_unmapped_area = ffb_get_unmapped_area, + }, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_probe(pdev, ent, &driver); +} + +static struct pci_driver pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), +}; + +static int __init ffb_init(void) +{ + return drm_init(&pci_driver, pciidlist, &driver); +} + +static void __exit ffb_exit(void) +{ + drm_exit(&pci_driver); +} + +module_init(ffb_init); +module_exit(ffb_exit)); + +MODULE_AUTHOR( DRIVER_AUTHOR ); +MODULE_DESCRIPTION( DRIVER_DESC ); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/ffb_drv.h b/nx-X11/extras/drm/linux-core/ffb_drv.h new file mode 100644 index 000000000..5f3bd3ee6 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/ffb_drv.h @@ -0,0 +1,284 @@ +/* $Id: ffb_drv.h,v 1.1.1.1 2005/06/15 18:31:54 idr Exp $ + * ffb_drv.h: Creator/Creator3D direct rendering driver. + * + * Copyright (C) 2000 David S. Miller (davem@redhat.com) + */ + +/* Auxilliary clips. */ +typedef struct { + volatile unsigned int min; + volatile unsigned int max; +} ffb_auxclip, *ffb_auxclipPtr; + +/* FFB register set. */ +typedef struct _ffb_fbc { + /* Next vertex registers, on the right we list which drawops + * use said register and the logical name the register has in + * that context. + */ /* DESCRIPTION DRAWOP(NAME) */ +/*0x00*/unsigned int pad1[3]; /* Reserved */ +/*0x0c*/volatile unsigned int alpha; /* ALPHA Transparency */ +/*0x10*/volatile unsigned int red; /* RED */ +/*0x14*/volatile unsigned int green; /* GREEN */ +/*0x18*/volatile unsigned int blue; /* BLUE */ +/*0x1c*/volatile unsigned int z; /* DEPTH */ +/*0x20*/volatile unsigned int y; /* Y triangle(DOYF) */ + /* aadot(DYF) */ + /* ddline(DYF) */ + /* aaline(DYF) */ +/*0x24*/volatile unsigned int x; /* X triangle(DOXF) */ + /* aadot(DXF) */ + /* ddline(DXF) */ + /* aaline(DXF) */ +/*0x28*/unsigned int pad2[2]; /* Reserved */ +/*0x30*/volatile unsigned int ryf; /* Y (alias to DOYF) ddline(RYF) */ + /* aaline(RYF) */ + /* triangle(RYF) */ +/*0x34*/volatile unsigned int rxf; /* X ddline(RXF) */ + /* aaline(RXF) */ + /* triangle(RXF) */ +/*0x38*/unsigned int pad3[2]; /* Reserved */ +/*0x40*/volatile unsigned int dmyf; /* Y (alias to DOYF) triangle(DMYF) */ +/*0x44*/volatile unsigned int dmxf; /* X triangle(DMXF) */ +/*0x48*/unsigned int pad4[2]; /* Reserved */ +/*0x50*/volatile unsigned int ebyi; /* Y (alias to RYI) polygon(EBYI) */ +/*0x54*/volatile unsigned int ebxi; /* X polygon(EBXI) */ +/*0x58*/unsigned int pad5[2]; /* Reserved */ +/*0x60*/volatile unsigned int by; /* Y brline(RYI) */ + /* fastfill(OP) */ + /* polygon(YI) */ + /* rectangle(YI) */ + /* bcopy(SRCY) */ + /* vscroll(SRCY) */ +/*0x64*/volatile unsigned int bx; /* X brline(RXI) */ + /* polygon(XI) */ + /* rectangle(XI) */ + /* bcopy(SRCX) */ + /* vscroll(SRCX) */ + /* fastfill(GO) */ +/*0x68*/volatile unsigned int dy; /* destination Y fastfill(DSTY) */ + /* bcopy(DSRY) */ + /* vscroll(DSRY) */ +/*0x6c*/volatile unsigned int dx; /* destination X fastfill(DSTX) */ + /* bcopy(DSTX) */ + /* vscroll(DSTX) */ +/*0x70*/volatile unsigned int bh; /* Y (alias to RYI) brline(DYI) */ + /* dot(DYI) */ + /* polygon(ETYI) */ + /* Height fastfill(H) */ + /* bcopy(H) */ + /* vscroll(H) */ + /* Y count fastfill(NY) */ +/*0x74*/volatile unsigned int bw; /* X dot(DXI) */ + /* brline(DXI) */ + /* polygon(ETXI) */ + /* fastfill(W) */ + /* bcopy(W) */ + /* vscroll(W) */ + /* fastfill(NX) */ +/*0x78*/unsigned int pad6[2]; /* Reserved */ +/*0x80*/unsigned int pad7[32]; /* Reserved */ + + /* Setup Unit's vertex state register */ +/*100*/ volatile unsigned int suvtx; +/*104*/ unsigned int pad8[63]; /* Reserved */ + + /* Frame Buffer Control Registers */ +/*200*/ volatile unsigned int ppc; /* Pixel Processor Control */ +/*204*/ volatile unsigned int wid; /* Current WID */ +/*208*/ volatile unsigned int fg; /* FG data */ +/*20c*/ volatile unsigned int bg; /* BG data */ +/*210*/ volatile unsigned int consty; /* Constant Y */ +/*214*/ volatile unsigned int constz; /* Constant Z */ +/*218*/ volatile unsigned int xclip; /* X Clip */ +/*21c*/ volatile unsigned int dcss; /* Depth Cue Scale Slope */ +/*220*/ volatile unsigned int vclipmin; /* Viewclip XY Min Bounds */ +/*224*/ volatile unsigned int vclipmax; /* Viewclip XY Max Bounds */ +/*228*/ volatile unsigned int vclipzmin; /* Viewclip Z Min Bounds */ +/*22c*/ volatile unsigned int vclipzmax; /* Viewclip Z Max Bounds */ +/*230*/ volatile unsigned int dcsf; /* Depth Cue Scale Front Bound */ +/*234*/ volatile unsigned int dcsb; /* Depth Cue Scale Back Bound */ +/*238*/ volatile unsigned int dczf; /* Depth Cue Z Front */ +/*23c*/ volatile unsigned int dczb; /* Depth Cue Z Back */ +/*240*/ unsigned int pad9; /* Reserved */ +/*244*/ volatile unsigned int blendc; /* Alpha Blend Control */ +/*248*/ volatile unsigned int blendc1; /* Alpha Blend Color 1 */ +/*24c*/ volatile unsigned int blendc2; /* Alpha Blend Color 2 */ +/*250*/ volatile unsigned int fbramitc; /* FB RAM Interleave Test Control */ +/*254*/ volatile unsigned int fbc; /* Frame Buffer Control */ +/*258*/ volatile unsigned int rop; /* Raster OPeration */ +/*25c*/ volatile unsigned int cmp; /* Frame Buffer Compare */ +/*260*/ volatile unsigned int matchab; /* Buffer AB Match Mask */ +/*264*/ volatile unsigned int matchc; /* Buffer C(YZ) Match Mask */ +/*268*/ volatile unsigned int magnab; /* Buffer AB Magnitude Mask */ +/*26c*/ volatile unsigned int magnc; /* Buffer C(YZ) Magnitude Mask */ +/*270*/ volatile unsigned int fbcfg0; /* Frame Buffer Config 0 */ +/*274*/ volatile unsigned int fbcfg1; /* Frame Buffer Config 1 */ +/*278*/ volatile unsigned int fbcfg2; /* Frame Buffer Config 2 */ +/*27c*/ volatile unsigned int fbcfg3; /* Frame Buffer Config 3 */ +/*280*/ volatile unsigned int ppcfg; /* Pixel Processor Config */ +/*284*/ volatile unsigned int pick; /* Picking Control */ +/*288*/ volatile unsigned int fillmode; /* FillMode */ +/*28c*/ volatile unsigned int fbramwac; /* FB RAM Write Address Control */ +/*290*/ volatile unsigned int pmask; /* RGB PlaneMask */ +/*294*/ volatile unsigned int xpmask; /* X PlaneMask */ +/*298*/ volatile unsigned int ypmask; /* Y PlaneMask */ +/*29c*/ volatile unsigned int zpmask; /* Z PlaneMask */ +/*2a0*/ ffb_auxclip auxclip[4]; /* Auxilliary Viewport Clip */ + + /* New 3dRAM III support regs */ +/*2c0*/ volatile unsigned int rawblend2; +/*2c4*/ volatile unsigned int rawpreblend; +/*2c8*/ volatile unsigned int rawstencil; +/*2cc*/ volatile unsigned int rawstencilctl; +/*2d0*/ volatile unsigned int threedram1; +/*2d4*/ volatile unsigned int threedram2; +/*2d8*/ volatile unsigned int passin; +/*2dc*/ volatile unsigned int rawclrdepth; +/*2e0*/ volatile unsigned int rawpmask; +/*2e4*/ volatile unsigned int rawcsrc; +/*2e8*/ volatile unsigned int rawmatch; +/*2ec*/ volatile unsigned int rawmagn; +/*2f0*/ volatile unsigned int rawropblend; +/*2f4*/ volatile unsigned int rawcmp; +/*2f8*/ volatile unsigned int rawwac; +/*2fc*/ volatile unsigned int fbramid; + +/*300*/ volatile unsigned int drawop; /* Draw OPeration */ +/*304*/ unsigned int pad10[2]; /* Reserved */ +/*30c*/ volatile unsigned int lpat; /* Line Pattern control */ +/*310*/ unsigned int pad11; /* Reserved */ +/*314*/ volatile unsigned int fontxy; /* XY Font coordinate */ +/*318*/ volatile unsigned int fontw; /* Font Width */ +/*31c*/ volatile unsigned int fontinc; /* Font Increment */ +/*320*/ volatile unsigned int font; /* Font bits */ +/*324*/ unsigned int pad12[3]; /* Reserved */ +/*330*/ volatile unsigned int blend2; +/*334*/ volatile unsigned int preblend; +/*338*/ volatile unsigned int stencil; +/*33c*/ volatile unsigned int stencilctl; + +/*340*/ unsigned int pad13[4]; /* Reserved */ +/*350*/ volatile unsigned int dcss1; /* Depth Cue Scale Slope 1 */ +/*354*/ volatile unsigned int dcss2; /* Depth Cue Scale Slope 2 */ +/*358*/ volatile unsigned int dcss3; /* Depth Cue Scale Slope 3 */ +/*35c*/ volatile unsigned int widpmask; +/*360*/ volatile unsigned int dcs2; +/*364*/ volatile unsigned int dcs3; +/*368*/ volatile unsigned int dcs4; +/*36c*/ unsigned int pad14; /* Reserved */ +/*370*/ volatile unsigned int dcd2; +/*374*/ volatile unsigned int dcd3; +/*378*/ volatile unsigned int dcd4; +/*37c*/ unsigned int pad15; /* Reserved */ +/*380*/ volatile unsigned int pattern[32]; /* area Pattern */ +/*400*/ unsigned int pad16[8]; /* Reserved */ +/*420*/ volatile unsigned int reset; /* chip RESET */ +/*424*/ unsigned int pad17[247]; /* Reserved */ +/*800*/ volatile unsigned int devid; /* Device ID */ +/*804*/ unsigned int pad18[63]; /* Reserved */ +/*900*/ volatile unsigned int ucsr; /* User Control & Status Register */ +/*904*/ unsigned int pad19[31]; /* Reserved */ +/*980*/ volatile unsigned int mer; /* Mode Enable Register */ +/*984*/ unsigned int pad20[1439]; /* Reserved */ +} ffb_fbc, *ffb_fbcPtr; + +struct ffb_hw_context { + int is_2d_only; + + unsigned int ppc; + unsigned int wid; + unsigned int fg; + unsigned int bg; + unsigned int consty; + unsigned int constz; + unsigned int xclip; + unsigned int dcss; + unsigned int vclipmin; + unsigned int vclipmax; + unsigned int vclipzmin; + unsigned int vclipzmax; + unsigned int dcsf; + unsigned int dcsb; + unsigned int dczf; + unsigned int dczb; + unsigned int blendc; + unsigned int blendc1; + unsigned int blendc2; + unsigned int fbc; + unsigned int rop; + unsigned int cmp; + unsigned int matchab; + unsigned int matchc; + unsigned int magnab; + unsigned int magnc; + unsigned int pmask; + unsigned int xpmask; + unsigned int ypmask; + unsigned int zpmask; + unsigned int auxclip0min; + unsigned int auxclip0max; + unsigned int auxclip1min; + unsigned int auxclip1max; + unsigned int auxclip2min; + unsigned int auxclip2max; + unsigned int auxclip3min; + unsigned int auxclip3max; + unsigned int drawop; + unsigned int lpat; + unsigned int fontxy; + unsigned int fontw; + unsigned int fontinc; + unsigned int area_pattern[32]; + unsigned int ucsr; + unsigned int stencil; + unsigned int stencilctl; + unsigned int dcss1; + unsigned int dcss2; + unsigned int dcss3; + unsigned int dcs2; + unsigned int dcs3; + unsigned int dcs4; + unsigned int dcd2; + unsigned int dcd3; + unsigned int dcd4; + unsigned int mer; +}; + +#define FFB_MAX_CTXS 32 + +enum ffb_chip_type { + ffb1_prototype = 0, /* Early pre-FCS FFB */ + ffb1_standard, /* First FCS FFB, 100Mhz UPA, 66MHz gclk */ + ffb1_speedsort, /* Second FCS FFB, 100Mhz UPA, 75MHz gclk */ + ffb2_prototype, /* Early pre-FCS vertical FFB2 */ + ffb2_vertical, /* First FCS FFB2/vertical, 100Mhz UPA, 100MHZ gclk, + 75(SingleBuffer)/83(DoubleBuffer) MHz fclk */ + ffb2_vertical_plus, /* Second FCS FFB2/vertical, same timings */ + ffb2_horizontal, /* First FCS FFB2/horizontal, same timings as FFB2/vert */ + ffb2_horizontal_plus, /* Second FCS FFB2/horizontal, same timings */ + afb_m3, /* FCS Elite3D, 3 float chips */ + afb_m6 /* FCS Elite3D, 6 float chips */ +}; + +typedef struct ffb_dev_priv { + /* Misc software state. */ + int prom_node; + enum ffb_chip_type ffb_type; + u64 card_phys_base; + struct miscdevice miscdev; + + /* Controller registers. */ + ffb_fbcPtr regs; + + /* Context table. */ + struct ffb_hw_context *hw_state[FFB_MAX_CTXS]; +} ffb_dev_priv_t; + +extern unsigned long ffb_get_unmapped_area(struct file *filp, + unsigned long hint, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +extern unsigned long ffb_driver_get_map_ofs(drm_map_t *map) +extern unsigned long ffb_driver_get_reg_ofs(drm_device_t *dev) diff --git a/nx-X11/extras/drm/linux-core/i810_dma.c b/nx-X11/extras/drm/linux-core/i810_dma.c new file mode 100644 index 000000000..d91b66549 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i810_dma.c @@ -0,0 +1,1417 @@ +/* i810_dma.c -- DMA support for the i810 -*- linux-c -*- + * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * Keith Whitwell <keith@tungstengraphics.com> + * + */ + +#include <linux/interrupt.h> /* For task queue support */ +#include <linux/delay.h> +#include <linux/pagemap.h> + +#include "drmP.h" +#include "drm.h" +#include "i810_drm.h" +#include "i810_drv.h" + +#ifdef DO_MUNMAP_4_ARGS +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) +#else +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l) +#endif + +#define I810_BUF_FREE 2 +#define I810_BUF_CLIENT 1 +#define I810_BUF_HARDWARE 0 + +#define I810_BUF_UNMAPPED 0 +#define I810_BUF_MAPPED 1 + +static inline void i810_print_status_page(drm_device_t * dev) +{ + drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = dev->dev_private; + u32 *temp = dev_priv->hw_status_page; + int i; + + DRM_DEBUG("hw_status: Interrupt Status : %x\n", temp[0]); + DRM_DEBUG("hw_status: LpRing Head ptr : %x\n", temp[1]); + DRM_DEBUG("hw_status: IRing Head ptr : %x\n", temp[2]); + DRM_DEBUG("hw_status: Reserved : %x\n", temp[3]); + DRM_DEBUG("hw_status: Last Render: %x\n", temp[4]); + DRM_DEBUG("hw_status: Driver Counter : %d\n", temp[5]); + for (i = 6; i < dma->buf_count + 6; i++) { + DRM_DEBUG("buffer status idx : %d used: %d\n", i - 6, temp[i]); + } +} + +static drm_buf_t *i810_freelist_get(drm_device_t * dev) +{ + drm_device_dma_t *dma = dev->dma; + int i; + int used; + + /* Linear search might not be the best solution */ + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + /* In use is already a pointer */ + used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, + I810_BUF_CLIENT); + if (used == I810_BUF_FREE) { + return buf; + } + } + return NULL; +} + +/* This should only be called if the buffer is not sent to the hardware + * yet, the hardware updates in use for us once its on the ring buffer. + */ + +static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf) +{ + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + int used; + + /* In use is already a pointer */ + used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); + if (used != I810_BUF_CLIENT) { + DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); + return -EINVAL; + } + + return 0; +} + +static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev; + drm_i810_private_t *dev_priv; + drm_buf_t *buf; + drm_i810_buf_priv_t *buf_priv; + + lock_kernel(); + dev = priv->head->dev; + dev_priv = dev->dev_private; + buf = dev_priv->mmap_buffer; + buf_priv = buf->dev_private; + + vma->vm_flags |= (VM_IO | VM_DONTCOPY); + vma->vm_file = filp; + + buf_priv->currently_mapped = I810_BUF_MAPPED; + unlock_kernel(); + + if (remap_pfn_range(vma, vma->vm_start, + VM_OFFSET(vma) >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + return 0; +} + +static struct file_operations i810_buffer_fops = { + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = i810_mmap_buffers, + .fasync = drm_fasync, +}; + +static int i810_map_buffer(drm_buf_t * buf, struct file *filp) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + drm_i810_private_t *dev_priv = dev->dev_private; + struct file_operations *old_fops; + int retcode = 0; + + if (buf_priv->currently_mapped == I810_BUF_MAPPED) + return -EINVAL; + + down_write(¤t->mm->mmap_sem); + old_fops = filp->f_op; + filp->f_op = &i810_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ | PROT_WRITE, + MAP_SHARED, buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_ERROR("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = NULL; + } + up_write(¤t->mm->mmap_sem); + + return retcode; +} + +static int i810_unmap_buffer(drm_buf_t * buf) +{ + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + int retcode = 0; + + if (buf_priv->currently_mapped != I810_BUF_MAPPED) + return -EINVAL; + + down_write(¤t->mm->mmap_sem); + retcode = DO_MUNMAP(current->mm, + (unsigned long)buf_priv->virtual, + (size_t) buf->total); + up_write(¤t->mm->mmap_sem); + + buf_priv->currently_mapped = I810_BUF_UNMAPPED; + buf_priv->virtual = NULL; + + return retcode; +} + +static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, + struct file *filp) +{ + drm_buf_t *buf; + drm_i810_buf_priv_t *buf_priv; + int retcode = 0; + + buf = i810_freelist_get(dev); + if (!buf) { + retcode = -ENOMEM; + DRM_DEBUG("retcode=%d\n", retcode); + return retcode; + } + + retcode = i810_map_buffer(buf, filp); + if (retcode) { + i810_freelist_put(dev, buf); + DRM_ERROR("mapbuf failed, retcode %d\n", retcode); + return retcode; + } + buf->filp = filp; + buf_priv = buf->dev_private; + d->granted = 1; + d->request_idx = buf->idx; + d->request_size = buf->total; + d->virtual = buf_priv->virtual; + + return retcode; +} + +static int i810_dma_cleanup(drm_device_t * dev) +{ + drm_device_dma_t *dma = dev->dma; + + /* Make sure interrupts are disabled here because the uninstall ioctl + * may not have been called from userspace and after dev_private + * is freed, it's too late. + */ + if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled) + drm_irq_uninstall(dev); + + if (dev->dev_private) { + int i; + drm_i810_private_t *dev_priv = + (drm_i810_private_t *) dev->dev_private; + + if (dev_priv->ring.virtual_start) { + drm_ioremapfree((void *)dev_priv->ring.virtual_start, + dev_priv->ring.Size, dev); + } + if (dev_priv->hw_status_page) { + pci_free_consistent(dev->pdev, PAGE_SIZE, + dev_priv->hw_status_page, + dev_priv->dma_status_page); + /* Need to rewrite hardware status page */ + I810_WRITE(0x02080, 0x1ffff000); + } + drm_free(dev->dev_private, sizeof(drm_i810_private_t), + DRM_MEM_DRIVER); + dev->dev_private = NULL; + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + if (buf_priv->kernel_virtual && buf->total) + drm_ioremapfree(buf_priv->kernel_virtual, + buf->total, dev); + } + } + return 0; +} + +static int i810_wait_ring(drm_device_t * dev, int n) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_ring_buffer_t *ring = &(dev_priv->ring); + int iters = 0; + unsigned long end; + unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + + end = jiffies + (HZ * 3); + while (ring->space < n) { + ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->Size; + + if (ring->head != last_head) { + end = jiffies + (HZ * 3); + last_head = ring->head; + } + + iters++; + if (time_before(end, jiffies)) { + DRM_ERROR("space: %d wanted %d\n", ring->space, n); + DRM_ERROR("lockup\n"); + goto out_wait_ring; + } + udelay(1); + } + + out_wait_ring: + return iters; +} + +static void i810_kernel_lost_context(drm_device_t * dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_ring_buffer_t *ring = &(dev_priv->ring); + + ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + ring->tail = I810_READ(LP_RING + RING_TAIL); + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->Size; +} + +static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) +{ + drm_device_dma_t *dma = dev->dma; + int my_idx = 24; + u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); + int i; + + if (dma->buf_count > 1019) { + /* Not enough space in the status page for the freelist */ + return -EINVAL; + } + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + buf_priv->in_use = hw_status++; + buf_priv->my_use_idx = my_idx; + my_idx += 4; + + *buf_priv->in_use = I810_BUF_FREE; + + buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, + buf->total, dev); + } + return 0; +} + +static int i810_dma_initialize(drm_device_t * dev, + drm_i810_private_t * dev_priv, + drm_i810_init_t * init) +{ + struct list_head *list; + + memset(dev_priv, 0, sizeof(drm_i810_private_t)); + + list_for_each(list, &dev->maplist->head) { + drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); + if (r_list->map && + r_list->map->type == _DRM_SHM && + r_list->map->flags & _DRM_CONTAINS_LOCK) { + dev_priv->sarea_map = r_list->map; + break; + } + } + if (!dev_priv->sarea_map) { + dev->dev_private = (void *)dev_priv; + i810_dma_cleanup(dev); + DRM_ERROR("can not find sarea!\n"); + return -EINVAL; + } + dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); + if (!dev_priv->mmio_map) { + dev->dev_private = (void *)dev_priv; + i810_dma_cleanup(dev); + DRM_ERROR("can not find mmio map!\n"); + return -EINVAL; + } + dev->agp_buffer_token = init->buffers_offset; + dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); + if (!dev->agp_buffer_map) { + dev->dev_private = (void *)dev_priv; + i810_dma_cleanup(dev); + DRM_ERROR("can not find dma buffer map!\n"); + return -EINVAL; + } + + dev_priv->sarea_priv = (drm_i810_sarea_t *) + ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset); + + dev_priv->ring.Start = init->ring_start; + dev_priv->ring.End = init->ring_end; + dev_priv->ring.Size = init->ring_size; + + dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + + init->ring_start, + init->ring_size, dev); + + if (dev_priv->ring.virtual_start == NULL) { + dev->dev_private = (void *)dev_priv; + i810_dma_cleanup(dev); + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); + return -ENOMEM; + } + + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; + + dev_priv->w = init->w; + dev_priv->h = init->h; + dev_priv->pitch = init->pitch; + dev_priv->back_offset = init->back_offset; + dev_priv->depth_offset = init->depth_offset; + dev_priv->front_offset = init->front_offset; + + dev_priv->overlay_offset = init->overlay_offset; + dev_priv->overlay_physical = init->overlay_physical; + + dev_priv->front_di1 = init->front_offset | init->pitch_bits; + dev_priv->back_di1 = init->back_offset | init->pitch_bits; + dev_priv->zi1 = init->depth_offset | init->pitch_bits; + + /* Program Hardware Status Page */ + dev_priv->hw_status_page = + pci_alloc_consistent(dev->pdev, PAGE_SIZE, + &dev_priv->dma_status_page); + if (!dev_priv->hw_status_page) { + dev->dev_private = (void *)dev_priv; + i810_dma_cleanup(dev); + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; + } + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); + + I810_WRITE(0x02080, dev_priv->dma_status_page); + DRM_DEBUG("Enabled hardware status page\n"); + + /* Now we need to init our freelist */ + if (i810_freelist_init(dev, dev_priv) != 0) { + dev->dev_private = (void *)dev_priv; + i810_dma_cleanup(dev); + DRM_ERROR("Not enough space in the status page for" + " the freelist\n"); + return -ENOMEM; + } + dev->dev_private = (void *)dev_priv; + + return 0; +} + +/* i810 DRM version 1.1 used a smaller init structure with different + * ordering of values than is currently used (drm >= 1.2). There is + * no defined way to detect the XFree version to correct this problem, + * however by checking using this procedure we can detect the correct + * thing to do. + * + * #1 Read the Smaller init structure from user-space + * #2 Verify the overlay_physical is a valid physical address, or NULL + * If it isn't then we have a v1.1 client. Fix up params. + * If it is, then we have a 1.2 client... get the rest of the data. + */ +static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) +{ + + /* Get v1.1 init data */ + if (copy_from_user(init, (drm_i810_pre12_init_t __user *) arg, + sizeof(drm_i810_pre12_init_t))) { + return -EFAULT; + } + + if ((!init->overlay_physical) || (init->overlay_physical > 4096)) { + + /* This is a v1.2 client, just get the v1.2 init data */ + DRM_INFO("Using POST v1.2 init.\n"); + if (copy_from_user(init, (drm_i810_init_t __user *) arg, + sizeof(drm_i810_init_t))) { + return -EFAULT; + } + } else { + + /* This is a v1.1 client, fix the params */ + DRM_INFO("Using PRE v1.2 init.\n"); + init->pitch_bits = init->h; + init->pitch = init->w; + init->h = init->overlay_physical; + init->w = init->overlay_offset; + init->overlay_physical = 0; + init->overlay_offset = 0; + } + + return 0; +} + +static int i810_dma_init(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i810_private_t *dev_priv; + drm_i810_init_t init; + int retcode = 0; + + /* Get only the init func */ + if (copy_from_user + (&init, (void __user *)arg, sizeof(drm_i810_init_func_t))) + return -EFAULT; + + switch (init.func) { + case I810_INIT_DMA: + /* This case is for backward compatibility. It + * handles XFree 4.1.0 and 4.2.0, and has to + * do some parameter checking as described below. + * It will someday go away. + */ + retcode = i810_dma_init_compat(&init, arg); + if (retcode) + return retcode; + + dev_priv = drm_alloc(sizeof(drm_i810_private_t), + DRM_MEM_DRIVER); + if (dev_priv == NULL) + return -ENOMEM; + retcode = i810_dma_initialize(dev, dev_priv, &init); + break; + + default: + case I810_INIT_DMA_1_4: + DRM_INFO("Using v1.4 init.\n"); + if (copy_from_user(&init, (drm_i810_init_t __user *) arg, + sizeof(drm_i810_init_t))) { + return -EFAULT; + } + dev_priv = drm_alloc(sizeof(drm_i810_private_t), + DRM_MEM_DRIVER); + if (dev_priv == NULL) + return -ENOMEM; + retcode = i810_dma_initialize(dev, dev_priv, &init); + break; + + case I810_CLEANUP_DMA: + DRM_INFO("DMA Cleanup\n"); + retcode = i810_dma_cleanup(dev); + break; + } + + return retcode; +} + +/* Most efficient way to verify state for the i810 is as it is + * emitted. Non-conformant state is silently dropped. + * + * Use 'volatile' & local var tmp to force the emitted values to be + * identical to the verified ones. + */ +static void i810EmitContextVerified(drm_device_t * dev, + volatile unsigned int *code) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + int i, j = 0; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING(I810_CTX_SETUP_SIZE); + + OUT_RING(GFX_OP_COLOR_FACTOR); + OUT_RING(code[I810_CTXREG_CF1]); + + OUT_RING(GFX_OP_STIPPLE); + OUT_RING(code[I810_CTXREG_ST1]); + + for (i = 4; i < I810_CTX_SETUP_SIZE; i++) { + tmp = code[i]; + + if ((tmp & (7 << 29)) == (3 << 29) && + (tmp & (0x1f << 24)) < (0x1d << 24)) { + OUT_RING(tmp); + j++; + } else + printk("constext state dropped!!!\n"); + } + + if (j & 1) + OUT_RING(0); + + ADVANCE_LP_RING(); +} + +static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + int i, j = 0; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING(I810_TEX_SETUP_SIZE); + + OUT_RING(GFX_OP_MAP_INFO); + OUT_RING(code[I810_TEXREG_MI1]); + OUT_RING(code[I810_TEXREG_MI2]); + OUT_RING(code[I810_TEXREG_MI3]); + + for (i = 4; i < I810_TEX_SETUP_SIZE; i++) { + tmp = code[i]; + + if ((tmp & (7 << 29)) == (3 << 29) && + (tmp & (0x1f << 24)) < (0x1d << 24)) { + OUT_RING(tmp); + j++; + } else + printk("texture state dropped!!!\n"); + } + + if (j & 1) + OUT_RING(0); + + ADVANCE_LP_RING(); +} + +/* Need to do some additional checking when setting the dest buffer. + */ +static void i810EmitDestVerified(drm_device_t * dev, + volatile unsigned int *code) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2); + + tmp = code[I810_DESTREG_DI1]; + if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { + OUT_RING(CMD_OP_DESTBUFFER_INFO); + OUT_RING(tmp); + } else + DRM_DEBUG("bad di1 %x (allow %x or %x)\n", + tmp, dev_priv->front_di1, dev_priv->back_di1); + + /* invarient: + */ + OUT_RING(CMD_OP_Z_BUFFER_INFO); + OUT_RING(dev_priv->zi1); + + OUT_RING(GFX_OP_DESTBUFFER_VARS); + OUT_RING(code[I810_DESTREG_DV1]); + + OUT_RING(GFX_OP_DRAWRECT_INFO); + OUT_RING(code[I810_DESTREG_DR1]); + OUT_RING(code[I810_DESTREG_DR2]); + OUT_RING(code[I810_DESTREG_DR3]); + OUT_RING(code[I810_DESTREG_DR4]); + OUT_RING(0); + + ADVANCE_LP_RING(); +} + +static void i810EmitState(drm_device_t * dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int dirty = sarea_priv->dirty; + + DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); + + if (dirty & I810_UPLOAD_BUFFERS) { + i810EmitDestVerified(dev, sarea_priv->BufferState); + sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS; + } + + if (dirty & I810_UPLOAD_CTX) { + i810EmitContextVerified(dev, sarea_priv->ContextState); + sarea_priv->dirty &= ~I810_UPLOAD_CTX; + } + + if (dirty & I810_UPLOAD_TEX0) { + i810EmitTexVerified(dev, sarea_priv->TexState[0]); + sarea_priv->dirty &= ~I810_UPLOAD_TEX0; + } + + if (dirty & I810_UPLOAD_TEX1) { + i810EmitTexVerified(dev, sarea_priv->TexState[1]); + sarea_priv->dirty &= ~I810_UPLOAD_TEX1; + } +} + +/* need to verify + */ +static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, + unsigned int clear_color, + unsigned int clear_zval) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int pitch = dev_priv->pitch; + int cpp = 2; + int i; + RING_LOCALS; + + if (dev_priv->current_page == 1) { + unsigned int tmp = flags; + + flags &= ~(I810_FRONT | I810_BACK); + if (tmp & I810_FRONT) + flags |= I810_BACK; + if (tmp & I810_BACK) + flags |= I810_FRONT; + } + + i810_kernel_lost_context(dev); + + if (nbox > I810_NR_SAREA_CLIPRECTS) + nbox = I810_NR_SAREA_CLIPRECTS; + + for (i = 0; i < nbox; i++, pbox++) { + unsigned int x = pbox->x1; + unsigned int y = pbox->y1; + unsigned int width = (pbox->x2 - x) * cpp; + unsigned int height = pbox->y2 - y; + unsigned int start = y * pitch + x * cpp; + + if (pbox->x1 > pbox->x2 || + pbox->y1 > pbox->y2 || + pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) + continue; + + if (flags & I810_FRONT) { + BEGIN_LP_RING(6); + OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); + OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); + OUT_RING((height << 16) | width); + OUT_RING(start); + OUT_RING(clear_color); + OUT_RING(0); + ADVANCE_LP_RING(); + } + + if (flags & I810_BACK) { + BEGIN_LP_RING(6); + OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); + OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); + OUT_RING((height << 16) | width); + OUT_RING(dev_priv->back_offset + start); + OUT_RING(clear_color); + OUT_RING(0); + ADVANCE_LP_RING(); + } + + if (flags & I810_DEPTH) { + BEGIN_LP_RING(6); + OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); + OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); + OUT_RING((height << 16) | width); + OUT_RING(dev_priv->depth_offset + start); + OUT_RING(clear_zval); + OUT_RING(0); + ADVANCE_LP_RING(); + } + } +} + +static void i810_dma_dispatch_swap(drm_device_t * dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int pitch = dev_priv->pitch; + int cpp = 2; + int i; + RING_LOCALS; + + DRM_DEBUG("swapbuffers\n"); + + i810_kernel_lost_context(dev); + + if (nbox > I810_NR_SAREA_CLIPRECTS) + nbox = I810_NR_SAREA_CLIPRECTS; + + for (i = 0; i < nbox; i++, pbox++) { + unsigned int w = pbox->x2 - pbox->x1; + unsigned int h = pbox->y2 - pbox->y1; + unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch; + unsigned int start = dst; + + if (pbox->x1 > pbox->x2 || + pbox->y1 > pbox->y2 || + pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) + continue; + + BEGIN_LP_RING(6); + OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4); + OUT_RING(pitch | (0xCC << 16)); + OUT_RING((h << 16) | (w * cpp)); + if (dev_priv->current_page == 0) + OUT_RING(dev_priv->front_offset + start); + else + OUT_RING(dev_priv->back_offset + start); + OUT_RING(pitch); + if (dev_priv->current_page == 0) + OUT_RING(dev_priv->back_offset + start); + else + OUT_RING(dev_priv->front_offset + start); + ADVANCE_LP_RING(); + } +} + +static void i810_dma_dispatch_vertex(drm_device_t * dev, + drm_buf_t * buf, int discard, int used) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_clip_rect_t *box = sarea_priv->boxes; + int nbox = sarea_priv->nbox; + unsigned long address = (unsigned long)buf->bus_address; + unsigned long start = address - dev->agp->base; + int i = 0; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + if (nbox > I810_NR_SAREA_CLIPRECTS) + nbox = I810_NR_SAREA_CLIPRECTS; + + if (used > 4 * 1024) + used = 0; + + if (sarea_priv->dirty) + i810EmitState(dev); + + if (buf_priv->currently_mapped == I810_BUF_MAPPED) { + unsigned int prim = (sarea_priv->vertex_prim & PR_MASK); + + *(u32 *) buf_priv->kernel_virtual = + ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2))); + + if (used & 4) { + *(u32 *) ((u32) buf_priv->kernel_virtual + used) = 0; + used += 4; + } + + i810_unmap_buffer(buf); + } + + if (used) { + do { + if (i < nbox) { + BEGIN_LP_RING(4); + OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR | + SC_ENABLE); + OUT_RING(GFX_OP_SCISSOR_INFO); + OUT_RING(box[i].x1 | (box[i].y1 << 16)); + OUT_RING((box[i].x2 - + 1) | ((box[i].y2 - 1) << 16)); + ADVANCE_LP_RING(); + } + + BEGIN_LP_RING(4); + OUT_RING(CMD_OP_BATCH_BUFFER); + OUT_RING(start | BB1_PROTECTED); + OUT_RING(start + used - 4); + OUT_RING(0); + ADVANCE_LP_RING(); + + } while (++i < nbox); + } + + if (discard) { + dev_priv->counter++; + + (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_HARDWARE); + + BEGIN_LP_RING(8); + OUT_RING(CMD_STORE_DWORD_IDX); + OUT_RING(20); + OUT_RING(dev_priv->counter); + OUT_RING(CMD_STORE_DWORD_IDX); + OUT_RING(buf_priv->my_use_idx); + OUT_RING(I810_BUF_FREE); + OUT_RING(CMD_REPORT_HEAD); + OUT_RING(0); + ADVANCE_LP_RING(); + } +} + +static void i810_dma_dispatch_flip(drm_device_t * dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + int pitch = dev_priv->pitch; + RING_LOCALS; + + DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", + __FUNCTION__, + dev_priv->current_page, + dev_priv->sarea_priv->pf_current_page); + + i810_kernel_lost_context(dev); + + BEGIN_LP_RING(2); + OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); + OUT_RING(0); + ADVANCE_LP_RING(); + + BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2); + /* On i815 at least ASYNC is buggy */ + /* pitch<<5 is from 11.2.8 p158, + its the pitch / 8 then left shifted 8, + so (pitch >> 3) << 8 */ + OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ ); + if (dev_priv->current_page == 0) { + OUT_RING(dev_priv->back_offset); + dev_priv->current_page = 1; + } else { + OUT_RING(dev_priv->front_offset); + dev_priv->current_page = 0; + } + OUT_RING(0); + ADVANCE_LP_RING(); + + BEGIN_LP_RING(2); + OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP); + OUT_RING(0); + ADVANCE_LP_RING(); + + /* Increment the frame counter. The client-side 3D driver must + * throttle the framerate by waiting for this value before + * performing the swapbuffer ioctl. + */ + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; + +} + +static void i810_dma_quiescent(drm_device_t * dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + +/* printk("%s\n", __FUNCTION__); */ + + i810_kernel_lost_context(dev); + + BEGIN_LP_RING(4); + OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); + OUT_RING(CMD_REPORT_HEAD); + OUT_RING(0); + OUT_RING(0); + ADVANCE_LP_RING(); + + i810_wait_ring(dev, dev_priv->ring.Size - 8); +} + +static int i810_flush_queue(drm_device_t * dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + int i, ret = 0; + RING_LOCALS; + +/* printk("%s\n", __FUNCTION__); */ + + i810_kernel_lost_context(dev); + + BEGIN_LP_RING(2); + OUT_RING(CMD_REPORT_HEAD); + OUT_RING(0); + ADVANCE_LP_RING(); + + i810_wait_ring(dev, dev_priv->ring.Size - 8); + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, + I810_BUF_FREE); + + if (used == I810_BUF_HARDWARE) + DRM_DEBUG("reclaimed from HARDWARE\n"); + if (used == I810_BUF_CLIENT) + DRM_DEBUG("still on client\n"); + } + + return ret; +} + +/* Must be called with the lock held */ +void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) +{ + drm_device_dma_t *dma = dev->dma; + int i; + + if (!dma) + return; + if (!dev->dev_private) + return; + if (!dma->buflist) + return; + + i810_flush_queue(dev); + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + + if (buf->filp == filp && buf_priv) { + int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_FREE); + + if (used == I810_BUF_CLIENT) + DRM_DEBUG("reclaimed from client\n"); + if (buf_priv->currently_mapped == I810_BUF_MAPPED) + buf_priv->currently_mapped = I810_BUF_UNMAPPED; + } + } +} + +static int i810_flush_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + + LOCK_TEST_WITH_RETURN(dev, filp); + + i810_flush_queue(dev); + return 0; +} + +static int i810_dma_vertex(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; + u32 *hw_status = dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + drm_i810_vertex_t vertex; + + if (copy_from_user + (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex))) + return -EFAULT; + + LOCK_TEST_WITH_RETURN(dev, filp); + + DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", + vertex.idx, vertex.used, vertex.discard); + + if (vertex.idx < 0 || vertex.idx > dma->buf_count) + return -EINVAL; + + i810_dma_dispatch_vertex(dev, + dma->buflist[vertex.idx], + vertex.discard, vertex.used); + + atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_inc(&dev->counts[_DRM_STAT_DMA]); + sarea_priv->last_enqueue = dev_priv->counter - 1; + sarea_priv->last_dispatch = (int)hw_status[5]; + + return 0; +} + +static int i810_clear_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i810_clear_t clear; + + if (copy_from_user + (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear))) + return -EFAULT; + + LOCK_TEST_WITH_RETURN(dev, filp); + + /* GH: Someone's doing nasty things... */ + if (!dev->dev_private) { + return -EINVAL; + } + + i810_dma_dispatch_clear(dev, clear.flags, + clear.clear_color, clear.clear_depth); + return 0; +} + +static int i810_swap_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + + DRM_DEBUG("i810_swap_bufs\n"); + + LOCK_TEST_WITH_RETURN(dev, filp); + + i810_dma_dispatch_swap(dev); + return 0; +} + +static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; + u32 *hw_status = dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + + sarea_priv->last_dispatch = (int)hw_status[5]; + return 0; +} + +static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + int retcode = 0; + drm_i810_dma_t d; + drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; + u32 *hw_status = dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + + if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) + return -EFAULT; + + LOCK_TEST_WITH_RETURN(dev, filp); + + d.granted = 0; + + retcode = i810_dma_get_buffer(dev, &d, filp); + + DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", + current->pid, retcode, d.granted); + + if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d))) + return -EFAULT; + sarea_priv->last_dispatch = (int)hw_status[5]; + + return retcode; +} + +static int i810_copybuf(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + /* Never copy - 2.4.x doesn't need it */ + return 0; +} + +static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + /* Never copy - 2.4.x doesn't need it */ + return 0; +} + +static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used, + unsigned int last_render) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + drm_i810_buf_priv_t *buf_priv = buf->dev_private; + drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned long address = (unsigned long)buf->bus_address; + unsigned long start = address - dev->agp->base; + int u; + RING_LOCALS; + + i810_kernel_lost_context(dev); + + u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE); + if (u != I810_BUF_CLIENT) { + DRM_DEBUG("MC found buffer that isn't mine!\n"); + } + + if (used > 4 * 1024) + used = 0; + + sarea_priv->dirty = 0x7f; + + DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used); + + dev_priv->counter++; + DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter); + DRM_DEBUG("i810_dma_dispatch_mc\n"); + DRM_DEBUG("start : %lx\n", start); + DRM_DEBUG("used : %d\n", used); + DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); + + if (buf_priv->currently_mapped == I810_BUF_MAPPED) { + if (used & 4) { + *(u32 *) ((u32) buf_priv->virtual + used) = 0; + used += 4; + } + + i810_unmap_buffer(buf); + } + BEGIN_LP_RING(4); + OUT_RING(CMD_OP_BATCH_BUFFER); + OUT_RING(start | BB1_PROTECTED); + OUT_RING(start + used - 4); + OUT_RING(0); + ADVANCE_LP_RING(); + + BEGIN_LP_RING(8); + OUT_RING(CMD_STORE_DWORD_IDX); + OUT_RING(buf_priv->my_use_idx); + OUT_RING(I810_BUF_FREE); + OUT_RING(0); + + OUT_RING(CMD_STORE_DWORD_IDX); + OUT_RING(16); + OUT_RING(last_render); + OUT_RING(0); + ADVANCE_LP_RING(); +} + +static int i810_dma_mc(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_device_dma_t *dma = dev->dma; + drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; + u32 *hw_status = dev_priv->hw_status_page; + drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) + dev_priv->sarea_priv; + drm_i810_mc_t mc; + + if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc))) + return -EFAULT; + + LOCK_TEST_WITH_RETURN(dev, filp); + + if (mc.idx >= dma->buf_count || mc.idx < 0) + return -EINVAL; + + i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used, + mc.last_render); + + atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_inc(&dev->counts[_DRM_STAT_DMA]); + sarea_priv->last_enqueue = dev_priv->counter - 1; + sarea_priv->last_dispatch = (int)hw_status[5]; + + return 0; +} + +static int i810_rstatus(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; + + return (int)(((u32 *) (dev_priv->hw_status_page))[4]); +} + +static int i810_ov0_info(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; + drm_i810_overlay_t data; + + data.offset = dev_priv->overlay_offset; + data.physical = dev_priv->overlay_physical; + if (copy_to_user + ((drm_i810_overlay_t __user *) arg, &data, sizeof(data))) + return -EFAULT; + return 0; +} + +static int i810_fstatus(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; + + LOCK_TEST_WITH_RETURN(dev, filp); + return I810_READ(0x30008); +} + +static int i810_ov0_flip(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; + + LOCK_TEST_WITH_RETURN(dev, filp); + //Tell the overlay to update + I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); + + return 0; +} + +/* Not sure why this isn't set all the time: + */ +static void i810_do_init_pageflip(drm_device_t * dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; +} + +static int i810_do_cleanup_pageflip(drm_device_t * dev) +{ + drm_i810_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + if (dev_priv->current_page != 0) + i810_dma_dispatch_flip(dev); + + dev_priv->page_flipping = 0; + return 0; +} + +static int i810_flip_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i810_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + + LOCK_TEST_WITH_RETURN(dev, filp); + + if (!dev_priv->page_flipping) + i810_do_init_pageflip(dev); + + i810_dma_dispatch_flip(dev); + return 0; +} + +int i810_driver_load(drm_device_t *dev, unsigned long flags) +{ + /* i810 has 4 more counters */ + dev->counters += 4; + dev->types[6] = _DRM_STAT_IRQ; + dev->types[7] = _DRM_STAT_PRIMARY; + dev->types[8] = _DRM_STAT_SECONDARY; + dev->types[9] = _DRM_STAT_DMA; + + return 0; +} + +void i810_driver_lastclose(drm_device_t * dev) +{ + i810_dma_cleanup(dev); +} + +void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) +{ + if (dev->dev_private) { + drm_i810_private_t *dev_priv = dev->dev_private; + if (dev_priv->page_flipping) { + i810_do_cleanup_pageflip(dev); + } + } +} + +void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +{ + i810_reclaim_buffers(dev, filp); +} + +int i810_driver_dma_quiescent(drm_device_t * dev) +{ + i810_dma_quiescent(dev); + return 0; +} + +drm_ioctl_desc_t i810_ioctls[] = { + [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH} +}; + +int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); + +/** + * Determine if the device really is AGP or not. + * + * All Intel graphics chipsets are treated as AGP, even if they are really + * PCI-e. + * + * \param dev The device to be tested. + * + * \returns + * A value of 1 is always retured to indictate every i810 is AGP. + */ +int i810_driver_device_is_agp(drm_device_t * dev) +{ + return 1; +} diff --git a/nx-X11/extras/drm/linux-core/i810_drm.h b/nx-X11/extras/drm/linux-core/i810_drm.h new file mode 100644 index 000000000..beec4a2a3 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i810_drm.h @@ -0,0 +1,291 @@ +#ifndef _I810_DRM_H_ +#define _I810_DRM_H_ + +/* WARNING: These defines must be the same as what the Xserver uses. + * if you change them, you must change the defines in the Xserver. + */ + +#ifndef _I810_DEFINES_ +#define _I810_DEFINES_ + +#define I810_DMA_BUF_ORDER 12 +#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER) +#define I810_DMA_BUF_NR 256 +#define I810_NR_SAREA_CLIPRECTS 8 + +/* Each region is a minimum of 64k, and there are at most 64 of them. + */ +#define I810_NR_TEX_REGIONS 64 +#define I810_LOG_MIN_TEX_REGION_SIZE 16 +#endif + +#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */ +#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */ +#define I810_UPLOAD_CTX 0x4 +#define I810_UPLOAD_BUFFERS 0x8 +#define I810_UPLOAD_TEX0 0x10 +#define I810_UPLOAD_TEX1 0x20 +#define I810_UPLOAD_CLIPRECTS 0x40 + +/* Indices into buf.Setup where various bits of state are mirrored per + * context and per buffer. These can be fired at the card as a unit, + * or in a piecewise fashion as required. + */ + +/* Destbuffer state + * - backbuffer linear offset and pitch -- invarient in the current dri + * - zbuffer linear offset and pitch -- also invarient + * - drawing origin in back and depth buffers. + * + * Keep the depth/back buffer state here to accommodate private buffers + * in the future. + */ +#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */ +#define I810_DESTREG_DI1 1 +#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */ +#define I810_DESTREG_DV1 3 +#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */ +#define I810_DESTREG_DR1 5 +#define I810_DESTREG_DR2 6 +#define I810_DESTREG_DR3 7 +#define I810_DESTREG_DR4 8 +#define I810_DEST_SETUP_SIZE 10 + +/* Context state + */ +#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */ +#define I810_CTXREG_CF1 1 +#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */ +#define I810_CTXREG_ST1 3 +#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */ +#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */ +#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */ +#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */ +#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */ +#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */ +#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */ +#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */ +#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */ +#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */ +#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */ +#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */ +#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */ +#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */ +#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */ +#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */ +#define I810_CTX_SETUP_SIZE 20 + +/* Texture state (per tex unit) + */ +#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */ +#define I810_TEXREG_MI1 1 +#define I810_TEXREG_MI2 2 +#define I810_TEXREG_MI3 3 +#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */ +#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */ +#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */ +#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */ +#define I810_TEX_SETUP_SIZE 8 + +/* Flags for clear ioctl + */ +#define I810_FRONT 0x1 +#define I810_BACK 0x2 +#define I810_DEPTH 0x4 + +typedef enum _drm_i810_init_func { + I810_INIT_DMA = 0x01, + I810_CLEANUP_DMA = 0x02, + I810_INIT_DMA_1_4 = 0x03 +} drm_i810_init_func_t; + +/* This is the init structure after v1.2 */ +typedef struct _drm_i810_init { + drm_i810_init_func_t func; +#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) + int ring_map_idx; + int buffer_map_idx; +#else + unsigned int mmio_offset; + unsigned int buffers_offset; +#endif + int sarea_priv_offset; + unsigned int ring_start; + unsigned int ring_end; + unsigned int ring_size; + unsigned int front_offset; + unsigned int back_offset; + unsigned int depth_offset; + unsigned int overlay_offset; + unsigned int overlay_physical; + unsigned int w; + unsigned int h; + unsigned int pitch; + unsigned int pitch_bits; +} drm_i810_init_t; + +/* This is the init structure prior to v1.2 */ +typedef struct _drm_i810_pre12_init { + drm_i810_init_func_t func; +#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) + int ring_map_idx; + int buffer_map_idx; +#else + unsigned int mmio_offset; + unsigned int buffers_offset; +#endif + int sarea_priv_offset; + unsigned int ring_start; + unsigned int ring_end; + unsigned int ring_size; + unsigned int front_offset; + unsigned int back_offset; + unsigned int depth_offset; + unsigned int w; + unsigned int h; + unsigned int pitch; + unsigned int pitch_bits; +} drm_i810_pre12_init_t; + +/* Warning: If you change the SAREA structure you must change the Xserver + * structure as well */ + +typedef struct _drm_i810_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char in_use; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_i810_tex_region_t; + +typedef struct _drm_i810_sarea { + unsigned int ContextState[I810_CTX_SETUP_SIZE]; + unsigned int BufferState[I810_DEST_SETUP_SIZE]; + unsigned int TexState[2][I810_TEX_SETUP_SIZE]; + unsigned int dirty; + + unsigned int nbox; + drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS]; + + /* Maintain an LRU of contiguous regions of texture space. If + * you think you own a region of texture memory, and it has an + * age different to the one you set, then you are mistaken and + * it has been stolen by another client. If global texAge + * hasn't changed, there is no need to walk the list. + * + * These regions can be used as a proxy for the fine-grained + * texture information of other clients - by maintaining them + * in the same lru which is used to age their own textures, + * clients have an approximate lru for the whole of global + * texture space, and can make informed decisions as to which + * areas to kick out. There is no need to choose whether to + * kick out your own texture or someone else's - simply eject + * them all in LRU order. + */ + + drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1]; + /* Last elt is sentinal */ + int texAge; /* last time texture was uploaded */ + int last_enqueue; /* last time a buffer was enqueued */ + int last_dispatch; /* age of the most recently dispatched buffer */ + int last_quiescent; /* */ + int ctxOwner; /* last context to upload state */ + + int vertex_prim; + + int pf_enabled; /* is pageflipping allowed? */ + int pf_active; + int pf_current_page; /* which buffer is being displayed? */ +} drm_i810_sarea_t; + +/* WARNING: If you change any of these defines, make sure to change the + * defines in the Xserver file (xf86drmMga.h) + */ + +/* i810 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_I810_INIT 0x00 +#define DRM_I810_VERTEX 0x01 +#define DRM_I810_CLEAR 0x02 +#define DRM_I810_FLUSH 0x03 +#define DRM_I810_GETAGE 0x04 +#define DRM_I810_GETBUF 0x05 +#define DRM_I810_SWAP 0x06 +#define DRM_I810_COPY 0x07 +#define DRM_I810_DOCOPY 0x08 +#define DRM_I810_OV0INFO 0x09 +#define DRM_I810_FSTATUS 0x0a +#define DRM_I810_OV0FLIP 0x0b +#define DRM_I810_MC 0x0c +#define DRM_I810_RSTATUS 0x0d +#define DRM_I810_FLIP 0x0e + +#define DRM_IOCTL_I810_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I810_INIT, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I810_VERTEX, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I810_CLEAR, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_I810_FLUSH) +#define DRM_IOCTL_I810_GETAGE DRM_IO( DRM_COMMAND_BASE + DRM_I810_GETAGE) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I810_GETBUF, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_I810_SWAP) +#define DRM_IOCTL_I810_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I810_COPY, drm_i810_copy_t) +#define DRM_IOCTL_I810_DOCOPY DRM_IO( DRM_COMMAND_BASE + DRM_I810_DOCOPY) +#define DRM_IOCTL_I810_OV0INFO DRM_IOR( DRM_COMMAND_BASE + DRM_I810_OV0INFO, drm_i810_overlay_t) +#define DRM_IOCTL_I810_FSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FSTATUS) +#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_OV0FLIP) +#define DRM_IOCTL_I810_MC DRM_IOW( DRM_COMMAND_BASE + DRM_I810_MC, drm_i810_mc_t) +#define DRM_IOCTL_I810_RSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_RSTATUS) +#define DRM_IOCTL_I810_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FLIP) + +typedef struct _drm_i810_clear { + int clear_color; + int clear_depth; + int flags; +} drm_i810_clear_t; + +/* These may be placeholders if we have more cliprects than + * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to + * false, indicating that the buffer will be dispatched again with a + * new set of cliprects. + */ +typedef struct _drm_i810_vertex { + int idx; /* buffer index */ + int used; /* nr bytes in use */ + int discard; /* client is finished with the buffer? */ +} drm_i810_vertex_t; + +typedef struct _drm_i810_copy_t { + int idx; /* buffer index */ + int used; /* nr bytes in use */ + void *address; /* Address to copy from */ +} drm_i810_copy_t; + +#define PR_TRIANGLES (0x0<<18) +#define PR_TRISTRIP_0 (0x1<<18) +#define PR_TRISTRIP_1 (0x2<<18) +#define PR_TRIFAN (0x3<<18) +#define PR_POLYGON (0x4<<18) +#define PR_LINES (0x5<<18) +#define PR_LINESTRIP (0x6<<18) +#define PR_RECTS (0x7<<18) +#define PR_MASK (0x7<<18) + +typedef struct drm_i810_dma { + void *virtual; + int request_idx; + int request_size; + int granted; +} drm_i810_dma_t; + +typedef struct _drm_i810_overlay_t { + unsigned int offset; /* Address of the Overlay Regs */ + unsigned int physical; +} drm_i810_overlay_t; + +typedef struct _drm_i810_mc { + int idx; /* buffer index */ + int used; /* nr bytes in use */ + int num_blocks; /* number of GFXBlocks */ + int *length; /* List of lengths for GFXBlocks (FUTURE) */ + unsigned int last_render; /* Last Render Request */ +} drm_i810_mc_t; + +#endif /* _I810_DRM_H_ */ diff --git a/nx-X11/extras/drm/linux-core/i810_drv.c b/nx-X11/extras/drm/linux-core/i810_drv.c new file mode 100644 index 000000000..fca04cc6d --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i810_drv.c @@ -0,0 +1,107 @@ +/* i810_drv.c -- I810 driver -*- linux-c -*- + * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * Gareth Hughes <gareth@valinux.com> + */ + +#include <linux/config.h> +#include "drmP.h" +#include "drm.h" +#include "i810_drm.h" +#include "i810_drv.h" + +#include "drm_pciids.h" + +static int dri_library_name(struct drm_device * dev, char * buf) +{ + return snprintf(buf, PAGE_SIZE, "i830\n"); +} + +static struct pci_device_id pciidlist[] = { + i810_PCI_IDS +}; + +extern drm_ioctl_desc_t i810_ioctls[]; +extern int i810_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | + DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, + .dev_priv_size = sizeof(drm_i810_buf_priv_t), + .load = i810_driver_load, + .lastclose = i810_driver_lastclose, + .preclose = i810_driver_preclose, + .device_is_agp = i810_driver_device_is_agp, + .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, + .dma_quiescent = i810_driver_dma_quiescent, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .dri_library_name = dri_library_name, + .ioctls = i810_ioctls, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + } +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static int __init i810_init(void) +{ + driver.num_ioctls = i810_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit i810_exit(void) +{ + drm_exit(&driver); +} + +module_init(i810_init); +module_exit(i810_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/i810_drv.h b/nx-X11/extras/drm/linux-core/i810_drv.h new file mode 100644 index 000000000..65bc77ddf --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i810_drv.h @@ -0,0 +1,238 @@ +/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*- + * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * + */ + +#ifndef _I810_DRV_H_ +#define _I810_DRV_H_ + +/* General customization: + */ + +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "i810" +#define DRIVER_DESC "Intel i810" +#define DRIVER_DATE "20030605" + +/* Interface history + * + * 1.1 - XFree86 4.1 + * 1.2 - XvMC interfaces + * - XFree86 4.2 + * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility) + * - Remove requirement for interrupt (leave stubs again) + * 1.3 - Add page flipping. + * 1.4 - fix DRM interface + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 4 +#define DRIVER_PATCHLEVEL 0 + +typedef struct drm_i810_buf_priv { + u32 *in_use; + int my_use_idx; + int currently_mapped; + void *virtual; + void *kernel_virtual; +} drm_i810_buf_priv_t; + +typedef struct _drm_i810_ring_buffer { + int tail_mask; + unsigned long Start; + unsigned long End; + unsigned long Size; + u8 *virtual_start; + int head; + int tail; + int space; +} drm_i810_ring_buffer_t; + +typedef struct drm_i810_private { + drm_map_t *sarea_map; + drm_map_t *mmio_map; + + drm_i810_sarea_t *sarea_priv; + drm_i810_ring_buffer_t ring; + + void *hw_status_page; + unsigned long counter; + + dma_addr_t dma_status_page; + + drm_buf_t *mmap_buffer; + + u32 front_di1, back_di1, zi1; + + int back_offset; + int depth_offset; + int overlay_offset; + int overlay_physical; + int w, h; + int pitch; + int back_pitch; + int depth_pitch; + + int do_boxes; + int dma_used; + + int current_page; + int page_flipping; + + wait_queue_head_t irq_queue; + atomic_t irq_received; + atomic_t irq_emitted; + + int front_offset; +} drm_i810_private_t; + + /* i810_dma.c */ +extern void i810_reclaim_buffers(drm_device_t *dev, struct file *filp); + +extern int i810_driver_dma_quiescent(drm_device_t * dev); +extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, + struct file *filp); +extern int i810_driver_load(struct drm_device *, unsigned long flags); +extern void i810_driver_lastclose(drm_device_t * dev); +extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp); +extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, + struct file *filp); +extern int i810_driver_device_is_agp(drm_device_t * dev); + +#define I810_BASE(reg) ((unsigned long) \ + dev_priv->mmio_map->handle) +#define I810_ADDR(reg) (I810_BASE(reg) + reg) +#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg) +#define I810_READ(reg) I810_DEREF(reg) +#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) +#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg) +#define I810_READ16(reg) I810_DEREF16(reg) +#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0) + +#define I810_VERBOSE 0 +#define RING_LOCALS unsigned int outring, ringmask; \ + volatile char *virt; + +#define BEGIN_LP_RING(n) do { \ + if (I810_VERBOSE) \ + DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i810_wait_ring(dev, n*4); \ + dev_priv->ring.space -= n*4; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ +} while (0) + +#define ADVANCE_LP_RING() do { \ + if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ + dev_priv->ring.tail = outring; \ + I810_WRITE(LP_RING + RING_TAIL, outring); \ +} while(0) + +#define OUT_RING(n) do { \ + if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outring += 4; \ + outring &= ringmask; \ +} while (0) + +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) +#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) +#define CMD_REPORT_HEAD (7<<23) +#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) +#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) + +#define INST_PARSER_CLIENT 0x00000000 +#define INST_OP_FLUSH 0x02000000 +#define INST_FLUSH_MAP_CACHE 0x00000001 + +#define BB1_START_ADDR_MASK (~0x7) +#define BB1_PROTECTED (1<<0) +#define BB1_UNPROTECTED (0<<0) +#define BB2_END_ADDR_MASK (~0x7) + +#define I810REG_HWSTAM 0x02098 +#define I810REG_INT_IDENTITY_R 0x020a4 +#define I810REG_INT_MASK_R 0x020a8 +#define I810REG_INT_ENABLE_R 0x020a0 + +#define LP_RING 0x2030 +#define HP_RING 0x2040 +#define RING_TAIL 0x00 +#define TAIL_ADDR 0x000FFFF8 +#define RING_HEAD 0x04 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START 0x08 +#define START_ADDR 0x00FFFFF8 +#define RING_LEN 0x0C +#define RING_NR_PAGES 0x000FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 + +#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define SC_UPDATE_SCISSOR (0x1<<1) +#define SC_ENABLE_MASK (0x1<<0) +#define SC_ENABLE (0x1<<0) + +#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) +#define SCI_YMIN_MASK (0xffff<<16) +#define SCI_XMIN_MASK (0xffff<<0) +#define SCI_YMAX_MASK (0xffff<<16) +#define SCI_XMAX_MASK (0xffff<<0) + +#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2) +#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) +#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) +#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24)) + +#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23)) +#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23)) +#define CMD_OP_FRONTBUFFER_INFO ((0x0<<29)|(0x14<<23)) +#define CMD_OP_WAIT_FOR_EVENT ((0x0<<29)|(0x03<<23)) + +#define BR00_BITBLT_CLIENT 0x40000000 +#define BR00_OP_COLOR_BLT 0x10000000 +#define BR00_OP_SRC_COPY_BLT 0x10C00000 +#define BR13_SOLID_PATTERN 0x80000000 + +#define WAIT_FOR_PLANE_A_SCANLINES (1<<1) +#define WAIT_FOR_PLANE_A_FLIP (1<<2) +#define WAIT_FOR_VBLANK (1<<3) + +#endif diff --git a/nx-X11/extras/drm/linux-core/i830_dma.c b/nx-X11/extras/drm/linux-core/i830_dma.c new file mode 100644 index 000000000..9d80d917d --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i830_dma.c @@ -0,0 +1,1599 @@ +/* i830_dma.c -- DMA support for the I830 -*- linux-c -*- + * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * Keith Whitwell <keith@tungstengraphics.com> + * Abraham vd Merwe <abraham@2d3d.co.za> + * + */ + +#include <linux/interrupt.h> /* For task queue support */ +#include <linux/pagemap.h> /* For FASTCALL on unlock_page() */ +#include <linux/delay.h> +#include <asm/uaccess.h> + +#include "drmP.h" +#include "drm.h" +#include "i830_drm.h" +#include "i830_drv.h" + +#ifdef DO_MUNMAP_4_ARGS +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) +#else +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l) +#endif + +#define I830_BUF_FREE 2 +#define I830_BUF_CLIENT 1 +#define I830_BUF_HARDWARE 0 + +#define I830_BUF_UNMAPPED 0 +#define I830_BUF_MAPPED 1 + +static drm_buf_t *i830_freelist_get(drm_device_t * dev) +{ + drm_device_dma_t *dma = dev->dma; + int i; + int used; + + /* Linear search might not be the best solution */ + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i830_buf_priv_t *buf_priv = buf->dev_private; + /* In use is already a pointer */ + used = cmpxchg(buf_priv->in_use, I830_BUF_FREE, + I830_BUF_CLIENT); + if (used == I830_BUF_FREE) { + return buf; + } + } + return NULL; +} + +/* This should only be called if the buffer is not sent to the hardware + * yet, the hardware updates in use for us once its on the ring buffer. + */ + +static int i830_freelist_put(drm_device_t * dev, drm_buf_t * buf) +{ + drm_i830_buf_priv_t *buf_priv = buf->dev_private; + int used; + + /* In use is already a pointer */ + used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE); + if (used != I830_BUF_CLIENT) { + DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); + return -EINVAL; + } + + return 0; +} + + +static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev; + drm_i830_private_t *dev_priv; + drm_buf_t *buf; + drm_i830_buf_priv_t *buf_priv; + + lock_kernel(); + dev = priv->head->dev; + dev_priv = dev->dev_private; + buf = dev_priv->mmap_buffer; + buf_priv = buf->dev_private; + + vma->vm_flags |= (VM_IO | VM_DONTCOPY); + vma->vm_file = filp; + + buf_priv->currently_mapped = I830_BUF_MAPPED; + unlock_kernel(); + + if (remap_pfn_range(vma, vma->vm_start, + VM_OFFSET(vma) >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + return 0; +} + +static struct file_operations i830_buffer_fops = { + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = i830_mmap_buffers, + .fasync = drm_fasync, +}; + +static int i830_map_buffer(drm_buf_t * buf, struct file *filp) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i830_buf_priv_t *buf_priv = buf->dev_private; + drm_i830_private_t *dev_priv = dev->dev_private; + struct file_operations *old_fops; + unsigned long virtual; + int retcode = 0; + + if (buf_priv->currently_mapped == I830_BUF_MAPPED) + return -EINVAL; + + down_write(¤t->mm->mmap_sem); + old_fops = filp->f_op; + filp->f_op = &i830_buffer_fops; + dev_priv->mmap_buffer = buf; + virtual = do_mmap(filp, 0, buf->total, PROT_READ | PROT_WRITE, + MAP_SHARED, buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if (IS_ERR((void *)virtual)) { /* ugh */ + /* Real error */ + DRM_ERROR("mmap error\n"); + retcode = virtual; + buf_priv->virtual = NULL; + } else { + buf_priv->virtual = (void __user *)virtual; + } + up_write(¤t->mm->mmap_sem); + + return retcode; +} + +static int i830_unmap_buffer(drm_buf_t * buf) +{ + drm_i830_buf_priv_t *buf_priv = buf->dev_private; + int retcode = 0; + + if (buf_priv->currently_mapped != I830_BUF_MAPPED) + return -EINVAL; + + down_write(¤t->mm->mmap_sem); + retcode = DO_MUNMAP(current->mm, + (unsigned long)buf_priv->virtual, + (size_t) buf->total); + up_write(¤t->mm->mmap_sem); + + buf_priv->currently_mapped = I830_BUF_UNMAPPED; + buf_priv->virtual = NULL; + + return retcode; +} + +static int i830_dma_get_buffer(drm_device_t * dev, drm_i830_dma_t * d, + struct file *filp) +{ + drm_buf_t *buf; + drm_i830_buf_priv_t *buf_priv; + int retcode = 0; + + buf = i830_freelist_get(dev); + if (!buf) { + retcode = -ENOMEM; + DRM_DEBUG("retcode=%d\n", retcode); + return retcode; + } + + retcode = i830_map_buffer(buf, filp); + if (retcode) { + i830_freelist_put(dev, buf); + DRM_ERROR("mapbuf failed, retcode %d\n", retcode); + return retcode; + } + buf->filp = filp; + buf_priv = buf->dev_private; + d->granted = 1; + d->request_idx = buf->idx; + d->request_size = buf->total; + d->virtual = buf_priv->virtual; + + return retcode; +} + +static int i830_dma_cleanup(drm_device_t * dev) +{ + drm_device_dma_t *dma = dev->dma; + + /* Make sure interrupts are disabled here because the uninstall ioctl + * may not have been called from userspace and after dev_private + * is freed, it's too late. + */ + if (dev->irq_enabled) + drm_irq_uninstall(dev); + + if (dev->dev_private) { + int i; + drm_i830_private_t *dev_priv = + (drm_i830_private_t *) dev->dev_private; + + if (dev_priv->ring.virtual_start) { + drm_ioremapfree((void *)dev_priv->ring.virtual_start, + dev_priv->ring.Size, dev); + } + if (dev_priv->hw_status_page) { + pci_free_consistent(dev->pdev, PAGE_SIZE, + dev_priv->hw_status_page, + dev_priv->dma_status_page); + /* Need to rewrite hardware status page */ + I830_WRITE(0x02080, 0x1ffff000); + } + + drm_free(dev->dev_private, sizeof(drm_i830_private_t), + DRM_MEM_DRIVER); + dev->dev_private = NULL; + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i830_buf_priv_t *buf_priv = buf->dev_private; + if (buf_priv->kernel_virtual && buf->total) + drm_ioremapfree(buf_priv->kernel_virtual, + buf->total, dev); + } + } + return 0; +} + +int i830_wait_ring(drm_device_t * dev, int n, const char *caller) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_ring_buffer_t *ring = &(dev_priv->ring); + int iters = 0; + unsigned long end; + unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + + end = jiffies + (HZ * 3); + while (ring->space < n) { + ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->Size; + + if (ring->head != last_head) { + end = jiffies + (HZ * 3); + last_head = ring->head; + } + + iters++; + if (time_before(end, jiffies)) { + DRM_ERROR("space: %d wanted %d\n", ring->space, n); + DRM_ERROR("lockup\n"); + goto out_wait_ring; + } + udelay(1); + dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; + } + + out_wait_ring: + return iters; +} + +static void i830_kernel_lost_context(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_ring_buffer_t *ring = &(dev_priv->ring); + + ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; + ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->Size; + + if (ring->head == ring->tail) + dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY; +} + +static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv) +{ + drm_device_dma_t *dma = dev->dma; + int my_idx = 36; + u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); + int i; + + if (dma->buf_count > 1019) { + /* Not enough space in the status page for the freelist */ + return -EINVAL; + } + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i830_buf_priv_t *buf_priv = buf->dev_private; + + buf_priv->in_use = hw_status++; + buf_priv->my_use_idx = my_idx; + my_idx += 4; + + *buf_priv->in_use = I830_BUF_FREE; + + buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, + buf->total, dev); + } + return 0; +} + +static int i830_dma_initialize(drm_device_t * dev, + drm_i830_private_t * dev_priv, + drm_i830_init_t * init) +{ + struct list_head *list; + + memset(dev_priv, 0, sizeof(drm_i830_private_t)); + + list_for_each(list, &dev->maplist->head) { + drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); + if (r_list->map && + r_list->map->type == _DRM_SHM && + r_list->map->flags & _DRM_CONTAINS_LOCK) { + dev_priv->sarea_map = r_list->map; + break; + } + } + + if (!dev_priv->sarea_map) { + dev->dev_private = (void *)dev_priv; + i830_dma_cleanup(dev); + DRM_ERROR("can not find sarea!\n"); + return -EINVAL; + } + dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); + if (!dev_priv->mmio_map) { + dev->dev_private = (void *)dev_priv; + i830_dma_cleanup(dev); + DRM_ERROR("can not find mmio map!\n"); + return -EINVAL; + } + dev->agp_buffer_token = init->buffers_offset; + dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); + if (!dev->agp_buffer_map) { + dev->dev_private = (void *)dev_priv; + i830_dma_cleanup(dev); + DRM_ERROR("can not find dma buffer map!\n"); + return -EINVAL; + } + + dev_priv->sarea_priv = (drm_i830_sarea_t *) + ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset); + + dev_priv->ring.Start = init->ring_start; + dev_priv->ring.End = init->ring_end; + dev_priv->ring.Size = init->ring_size; + + dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + + init->ring_start, + init->ring_size, dev); + + if (dev_priv->ring.virtual_start == NULL) { + dev->dev_private = (void *)dev_priv; + i830_dma_cleanup(dev); + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); + return -ENOMEM; + } + + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; + + dev_priv->w = init->w; + dev_priv->h = init->h; + dev_priv->pitch = init->pitch; + dev_priv->back_offset = init->back_offset; + dev_priv->depth_offset = init->depth_offset; + dev_priv->front_offset = init->front_offset; + + dev_priv->front_di1 = init->front_offset | init->pitch_bits; + dev_priv->back_di1 = init->back_offset | init->pitch_bits; + dev_priv->zi1 = init->depth_offset | init->pitch_bits; + + DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1); + DRM_DEBUG("back_offset %x\n", dev_priv->back_offset); + DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1); + DRM_DEBUG("pitch_bits %x\n", init->pitch_bits); + + dev_priv->cpp = init->cpp; + /* We are using separate values as placeholders for mechanisms for + * private backbuffer/depthbuffer usage. + */ + + dev_priv->back_pitch = init->back_pitch; + dev_priv->depth_pitch = init->depth_pitch; + dev_priv->do_boxes = 0; + dev_priv->use_mi_batchbuffer_start = 0; + + /* Program Hardware Status Page */ + dev_priv->hw_status_page = + pci_alloc_consistent(dev->pdev, PAGE_SIZE, + &dev_priv->dma_status_page); + if (!dev_priv->hw_status_page) { + dev->dev_private = (void *)dev_priv; + i830_dma_cleanup(dev); + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; + } + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); + + I830_WRITE(0x02080, dev_priv->dma_status_page); + DRM_DEBUG("Enabled hardware status page\n"); + + /* Now we need to init our freelist */ + if (i830_freelist_init(dev, dev_priv) != 0) { + dev->dev_private = (void *)dev_priv; + i830_dma_cleanup(dev); + DRM_ERROR("Not enough space in the status page for" + " the freelist\n"); + return -ENOMEM; + } + dev->dev_private = (void *)dev_priv; + + return 0; +} + +static int i830_dma_init(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i830_private_t *dev_priv; + drm_i830_init_t init; + int retcode = 0; + + if (copy_from_user(&init, (void *__user)arg, sizeof(init))) + return -EFAULT; + + switch (init.func) { + case I830_INIT_DMA: + dev_priv = drm_alloc(sizeof(drm_i830_private_t), + DRM_MEM_DRIVER); + if (dev_priv == NULL) + return -ENOMEM; + retcode = i830_dma_initialize(dev, dev_priv, &init); + break; + case I830_CLEANUP_DMA: + retcode = i830_dma_cleanup(dev); + break; + default: + retcode = -EINVAL; + break; + } + + return retcode; +} + +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define ST1_ENABLE (1<<16) +#define ST1_MASK (0xffff) + +/* Most efficient way to verify state for the i830 is as it is + * emitted. Non-conformant state is silently dropped. + */ +static void i830EmitContextVerified(drm_device_t * dev, unsigned int *code) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + int i, j = 0; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4); + + for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) { + tmp = code[i]; + if ((tmp & (7 << 29)) == CMD_3D && + (tmp & (0x1f << 24)) < (0x1d << 24)) { + OUT_RING(tmp); + j++; + } else { + DRM_ERROR("Skipping %d\n", i); + } + } + + OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD); + OUT_RING(code[I830_CTXREG_BLENDCOLR]); + j += 2; + + for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) { + tmp = code[i]; + if ((tmp & (7 << 29)) == CMD_3D && + (tmp & (0x1f << 24)) < (0x1d << 24)) { + OUT_RING(tmp); + j++; + } else { + DRM_ERROR("Skipping %d\n", i); + } + } + + OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD); + OUT_RING(code[I830_CTXREG_MCSB1]); + j += 2; + + if (j & 1) + OUT_RING(0); + + ADVANCE_LP_RING(); +} + +static void i830EmitTexVerified(drm_device_t * dev, unsigned int *code) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + int i, j = 0; + unsigned int tmp; + RING_LOCALS; + + if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO || + (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) == + (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) { + + BEGIN_LP_RING(I830_TEX_SETUP_SIZE); + + OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */ + OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */ + OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */ + OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */ + OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */ + OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */ + + for (i = 6; i < I830_TEX_SETUP_SIZE; i++) { + tmp = code[i]; + OUT_RING(tmp); + j++; + } + + if (j & 1) + OUT_RING(0); + + ADVANCE_LP_RING(); + } else + printk("rejected packet %x\n", code[0]); +} + +static void i830EmitTexBlendVerified(drm_device_t * dev, + unsigned int *code, unsigned int num) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + int i, j = 0; + unsigned int tmp; + RING_LOCALS; + + if (!num) + return; + + BEGIN_LP_RING(num + 1); + + for (i = 0; i < num; i++) { + tmp = code[i]; + OUT_RING(tmp); + j++; + } + + if (j & 1) + OUT_RING(0); + + ADVANCE_LP_RING(); +} + +static void i830EmitTexPalette(drm_device_t * dev, + unsigned int *palette, int number, int is_shared) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + int i; + RING_LOCALS; + + return; + + BEGIN_LP_RING(258); + + if (is_shared == 1) { + OUT_RING(CMD_OP_MAP_PALETTE_LOAD | + MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH); + } else { + OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number)); + } + for (i = 0; i < 256; i++) { + OUT_RING(palette[i]); + } + OUT_RING(0); + /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop! + */ +} + +/* Need to do some additional checking when setting the dest buffer. + */ +static void i830EmitDestVerified(drm_device_t * dev, unsigned int *code) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + unsigned int tmp; + RING_LOCALS; + + BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10); + + tmp = code[I830_DESTREG_CBUFADDR]; + if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { + if (((int)outring) & 8) { + OUT_RING(0); + OUT_RING(0); + } + + OUT_RING(CMD_OP_DESTBUFFER_INFO); + OUT_RING(BUF_3D_ID_COLOR_BACK | + BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) | + BUF_3D_USE_FENCE); + OUT_RING(tmp); + OUT_RING(0); + + OUT_RING(CMD_OP_DESTBUFFER_INFO); + OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE | + BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); + OUT_RING(dev_priv->zi1); + OUT_RING(0); + } else { + DRM_ERROR("bad di1 %x (allow %x or %x)\n", + tmp, dev_priv->front_di1, dev_priv->back_di1); + } + + /* invarient: + */ + + OUT_RING(GFX_OP_DESTBUFFER_VARS); + OUT_RING(code[I830_DESTREG_DV1]); + + OUT_RING(GFX_OP_DRAWRECT_INFO); + OUT_RING(code[I830_DESTREG_DR1]); + OUT_RING(code[I830_DESTREG_DR2]); + OUT_RING(code[I830_DESTREG_DR3]); + OUT_RING(code[I830_DESTREG_DR4]); + + /* Need to verify this */ + tmp = code[I830_DESTREG_SENABLE]; + if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) { + OUT_RING(tmp); + } else { + DRM_ERROR("bad scissor enable\n"); + OUT_RING(0); + } + + OUT_RING(GFX_OP_SCISSOR_RECT); + OUT_RING(code[I830_DESTREG_SR1]); + OUT_RING(code[I830_DESTREG_SR2]); + OUT_RING(0); + + ADVANCE_LP_RING(); +} + +static void i830EmitStippleVerified(drm_device_t * dev, unsigned int *code) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + BEGIN_LP_RING(2); + OUT_RING(GFX_OP_STIPPLE); + OUT_RING(code[1]); + ADVANCE_LP_RING(); +} + +static void i830EmitState(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; + unsigned int dirty = sarea_priv->dirty; + + DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); + + if (dirty & I830_UPLOAD_BUFFERS) { + i830EmitDestVerified(dev, sarea_priv->BufferState); + sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS; + } + + if (dirty & I830_UPLOAD_CTX) { + i830EmitContextVerified(dev, sarea_priv->ContextState); + sarea_priv->dirty &= ~I830_UPLOAD_CTX; + } + + if (dirty & I830_UPLOAD_TEX0) { + i830EmitTexVerified(dev, sarea_priv->TexState[0]); + sarea_priv->dirty &= ~I830_UPLOAD_TEX0; + } + + if (dirty & I830_UPLOAD_TEX1) { + i830EmitTexVerified(dev, sarea_priv->TexState[1]); + sarea_priv->dirty &= ~I830_UPLOAD_TEX1; + } + + if (dirty & I830_UPLOAD_TEXBLEND0) { + i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0], + sarea_priv->TexBlendStateWordsUsed[0]); + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0; + } + + if (dirty & I830_UPLOAD_TEXBLEND1) { + i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1], + sarea_priv->TexBlendStateWordsUsed[1]); + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1; + } + + if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) { + i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); + } else { + if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { + i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); + } + if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { + i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); + } + + /* 1.3: + */ +#if 0 + if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) { + i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); + } + if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) { + i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); + } +#endif + } + + /* 1.3: + */ + if (dirty & I830_UPLOAD_STIPPLE) { + i830EmitStippleVerified(dev, sarea_priv->StippleState); + sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE; + } + + if (dirty & I830_UPLOAD_TEX2) { + i830EmitTexVerified(dev, sarea_priv->TexState2); + sarea_priv->dirty &= ~I830_UPLOAD_TEX2; + } + + if (dirty & I830_UPLOAD_TEX3) { + i830EmitTexVerified(dev, sarea_priv->TexState3); + sarea_priv->dirty &= ~I830_UPLOAD_TEX3; + } + + if (dirty & I830_UPLOAD_TEXBLEND2) { + i830EmitTexBlendVerified(dev, + sarea_priv->TexBlendState2, + sarea_priv->TexBlendStateWordsUsed2); + + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2; + } + + if (dirty & I830_UPLOAD_TEXBLEND3) { + i830EmitTexBlendVerified(dev, + sarea_priv->TexBlendState3, + sarea_priv->TexBlendStateWordsUsed3); + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3; + } +} + +/* ================================================================ + * Performance monitoring functions + */ + +static void i830_fill_box(drm_device_t * dev, + int x, int y, int w, int h, int r, int g, int b) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + u32 color; + unsigned int BR13, CMD; + RING_LOCALS; + + BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24); + CMD = XY_COLOR_BLT_CMD; + x += dev_priv->sarea_priv->boxes[0].x1; + y += dev_priv->sarea_priv->boxes[0].y1; + + if (dev_priv->cpp == 4) { + BR13 |= (1 << 25); + CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB); + color = (((0xff) << 24) | (r << 16) | (g << 8) | b); + } else { + color = (((r & 0xf8) << 8) | + ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); + } + + BEGIN_LP_RING(6); + OUT_RING(CMD); + OUT_RING(BR13); + OUT_RING((y << 16) | x); + OUT_RING(((y + h) << 16) | (x + w)); + + if (dev_priv->current_page == 1) { + OUT_RING(dev_priv->front_offset); + } else { + OUT_RING(dev_priv->back_offset); + } + + OUT_RING(color); + ADVANCE_LP_RING(); +} + +static void i830_cp_performance_boxes(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + /* Purple box for page flipping + */ + if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP) + i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255); + + /* Red box if we have to wait for idle at any point + */ + if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT) + i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0); + + /* Blue box: lost context? + */ + if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT) + i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255); + + /* Yellow box for texture swaps + */ + if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD) + i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0); + + /* Green box if hardware never idles (as far as we can tell) + */ + if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY)) + i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0); + + /* Draw bars indicating number of buffers allocated + * (not a great measure, easily confused) + */ + if (dev_priv->dma_used) { + int bar = dev_priv->dma_used / 10240; + if (bar > 100) + bar = 100; + if (bar < 1) + bar = 1; + i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128); + dev_priv->dma_used = 0; + } + + dev_priv->sarea_priv->perf_boxes = 0; +} + +static void i830_dma_dispatch_clear(drm_device_t * dev, int flags, + unsigned int clear_color, + unsigned int clear_zval, + unsigned int clear_depthmask) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int pitch = dev_priv->pitch; + int cpp = dev_priv->cpp; + int i; + unsigned int BR13, CMD, D_CMD; + RING_LOCALS; + + if (dev_priv->current_page == 1) { + unsigned int tmp = flags; + + flags &= ~(I830_FRONT | I830_BACK); + if (tmp & I830_FRONT) + flags |= I830_BACK; + if (tmp & I830_BACK) + flags |= I830_FRONT; + } + + i830_kernel_lost_context(dev); + + switch (cpp) { + case 2: + BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24); + D_CMD = CMD = XY_COLOR_BLT_CMD; + break; + case 4: + BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25); + CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA | + XY_COLOR_BLT_WRITE_RGB); + D_CMD = XY_COLOR_BLT_CMD; + if (clear_depthmask & 0x00ffffff) + D_CMD |= XY_COLOR_BLT_WRITE_RGB; + if (clear_depthmask & 0xff000000) + D_CMD |= XY_COLOR_BLT_WRITE_ALPHA; + break; + default: + BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24); + D_CMD = CMD = XY_COLOR_BLT_CMD; + break; + } + + if (nbox > I830_NR_SAREA_CLIPRECTS) + nbox = I830_NR_SAREA_CLIPRECTS; + + for (i = 0; i < nbox; i++, pbox++) { + if (pbox->x1 > pbox->x2 || + pbox->y1 > pbox->y2 || + pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) + continue; + + if (flags & I830_FRONT) { + DRM_DEBUG("clear front\n"); + BEGIN_LP_RING(6); + OUT_RING(CMD); + OUT_RING(BR13); + OUT_RING((pbox->y1 << 16) | pbox->x1); + OUT_RING((pbox->y2 << 16) | pbox->x2); + OUT_RING(dev_priv->front_offset); + OUT_RING(clear_color); + ADVANCE_LP_RING(); + } + + if (flags & I830_BACK) { + DRM_DEBUG("clear back\n"); + BEGIN_LP_RING(6); + OUT_RING(CMD); + OUT_RING(BR13); + OUT_RING((pbox->y1 << 16) | pbox->x1); + OUT_RING((pbox->y2 << 16) | pbox->x2); + OUT_RING(dev_priv->back_offset); + OUT_RING(clear_color); + ADVANCE_LP_RING(); + } + + if (flags & I830_DEPTH) { + DRM_DEBUG("clear depth\n"); + BEGIN_LP_RING(6); + OUT_RING(D_CMD); + OUT_RING(BR13); + OUT_RING((pbox->y1 << 16) | pbox->x1); + OUT_RING((pbox->y2 << 16) | pbox->x2); + OUT_RING(dev_priv->depth_offset); + OUT_RING(clear_zval); + ADVANCE_LP_RING(); + } + } +} + +static void i830_dma_dispatch_swap(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; + int nbox = sarea_priv->nbox; + drm_clip_rect_t *pbox = sarea_priv->boxes; + int pitch = dev_priv->pitch; + int cpp = dev_priv->cpp; + int i; + unsigned int CMD, BR13; + RING_LOCALS; + + DRM_DEBUG("swapbuffers\n"); + + i830_kernel_lost_context(dev); + + if (dev_priv->do_boxes) + i830_cp_performance_boxes(dev); + + switch (cpp) { + case 2: + BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24); + CMD = XY_SRC_COPY_BLT_CMD; + break; + case 4: + BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25); + CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB); + break; + default: + BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24); + CMD = XY_SRC_COPY_BLT_CMD; + break; + } + + if (nbox > I830_NR_SAREA_CLIPRECTS) + nbox = I830_NR_SAREA_CLIPRECTS; + + for (i = 0; i < nbox; i++, pbox++) { + if (pbox->x1 > pbox->x2 || + pbox->y1 > pbox->y2 || + pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) + continue; + + DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", + pbox->x1, pbox->y1, pbox->x2, pbox->y2); + + BEGIN_LP_RING(8); + OUT_RING(CMD); + OUT_RING(BR13); + OUT_RING((pbox->y1 << 16) | pbox->x1); + OUT_RING((pbox->y2 << 16) | pbox->x2); + + if (dev_priv->current_page == 0) + OUT_RING(dev_priv->front_offset); + else + OUT_RING(dev_priv->back_offset); + + OUT_RING((pbox->y1 << 16) | pbox->x1); + OUT_RING(BR13 & 0xffff); + + if (dev_priv->current_page == 0) + OUT_RING(dev_priv->back_offset); + else + OUT_RING(dev_priv->front_offset); + + ADVANCE_LP_RING(); + } +} + +static void i830_dma_dispatch_flip(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", + __FUNCTION__, + dev_priv->current_page, + dev_priv->sarea_priv->pf_current_page); + + i830_kernel_lost_context(dev); + + if (dev_priv->do_boxes) { + dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP; + i830_cp_performance_boxes(dev); + } + + BEGIN_LP_RING(2); + OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); + OUT_RING(0); + ADVANCE_LP_RING(); + + BEGIN_LP_RING(6); + OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); + OUT_RING(0); + if (dev_priv->current_page == 0) { + OUT_RING(dev_priv->back_offset); + dev_priv->current_page = 1; + } else { + OUT_RING(dev_priv->front_offset); + dev_priv->current_page = 0; + } + OUT_RING(0); + ADVANCE_LP_RING(); + + BEGIN_LP_RING(2); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); + OUT_RING(0); + ADVANCE_LP_RING(); + + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; +} + +static void i830_dma_dispatch_vertex(drm_device_t * dev, + drm_buf_t * buf, int discard, int used) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_buf_priv_t *buf_priv = buf->dev_private; + drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_clip_rect_t *box = sarea_priv->boxes; + int nbox = sarea_priv->nbox; + unsigned long address = (unsigned long)buf->bus_address; + unsigned long start = address - dev->agp->base; + int i = 0, u; + RING_LOCALS; + + i830_kernel_lost_context(dev); + + if (nbox > I830_NR_SAREA_CLIPRECTS) + nbox = I830_NR_SAREA_CLIPRECTS; + + if (discard) { + u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, + I830_BUF_HARDWARE); + if (u != I830_BUF_CLIENT) { + DRM_DEBUG("xxxx 2\n"); + } + } + + if (used > 4 * 1023) + used = 0; + + if (sarea_priv->dirty) + i830EmitState(dev); + + DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n", + address, used, nbox); + + dev_priv->counter++; + DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter); + DRM_DEBUG("i830_dma_dispatch\n"); + DRM_DEBUG("start : %lx\n", start); + DRM_DEBUG("used : %d\n", used); + DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); + + if (buf_priv->currently_mapped == I830_BUF_MAPPED) { + u32 *vp = buf_priv->kernel_virtual; + + vp[0] = (GFX_OP_PRIMITIVE | + sarea_priv->vertex_prim | ((used / 4) - 2)); + + if (dev_priv->use_mi_batchbuffer_start) { + vp[used / 4] = MI_BATCH_BUFFER_END; + used += 4; + } + + if (used & 4) { + vp[used / 4] = 0; + used += 4; + } + + i830_unmap_buffer(buf); + } + + if (used) { + do { + if (i < nbox) { + BEGIN_LP_RING(6); + OUT_RING(GFX_OP_DRAWRECT_INFO); + OUT_RING(sarea_priv-> + BufferState[I830_DESTREG_DR1]); + OUT_RING(box[i].x1 | (box[i].y1 << 16)); + OUT_RING(box[i].x2 | (box[i].y2 << 16)); + OUT_RING(sarea_priv-> + BufferState[I830_DESTREG_DR4]); + OUT_RING(0); + ADVANCE_LP_RING(); + } + + if (dev_priv->use_mi_batchbuffer_start) { + BEGIN_LP_RING(2); + OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); + OUT_RING(start | MI_BATCH_NON_SECURE); + ADVANCE_LP_RING(); + } else { + BEGIN_LP_RING(4); + OUT_RING(MI_BATCH_BUFFER); + OUT_RING(start | MI_BATCH_NON_SECURE); + OUT_RING(start + used - 4); + OUT_RING(0); + ADVANCE_LP_RING(); + } + + } while (++i < nbox); + } + + if (discard) { + dev_priv->counter++; + + (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, + I830_BUF_HARDWARE); + + BEGIN_LP_RING(8); + OUT_RING(CMD_STORE_DWORD_IDX); + OUT_RING(20); + OUT_RING(dev_priv->counter); + OUT_RING(CMD_STORE_DWORD_IDX); + OUT_RING(buf_priv->my_use_idx); + OUT_RING(I830_BUF_FREE); + OUT_RING(CMD_REPORT_HEAD); + OUT_RING(0); + ADVANCE_LP_RING(); + } +} + +static void i830_dma_quiescent(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + i830_kernel_lost_context(dev); + + BEGIN_LP_RING(4); + OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); + OUT_RING(CMD_REPORT_HEAD); + OUT_RING(0); + OUT_RING(0); + ADVANCE_LP_RING(); + + i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); +} + +static int i830_flush_queue(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + int i, ret = 0; + RING_LOCALS; + + i830_kernel_lost_context(dev); + + BEGIN_LP_RING(2); + OUT_RING(CMD_REPORT_HEAD); + OUT_RING(0); + ADVANCE_LP_RING(); + + i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i830_buf_priv_t *buf_priv = buf->dev_private; + + int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE, + I830_BUF_FREE); + + if (used == I830_BUF_HARDWARE) + DRM_DEBUG("reclaimed from HARDWARE\n"); + if (used == I830_BUF_CLIENT) + DRM_DEBUG("still on client\n"); + } + + return ret; +} + +/* Must be called with the lock held */ +void i830_reclaim_buffers(drm_device_t *dev, struct file *filp) +{ + drm_device_dma_t *dma = dev->dma; + int i; + + if (!dma) + return; + if (!dev->dev_private) + return; + if (!dma->buflist) + return; + + i830_flush_queue(dev); + + for (i = 0; i < dma->buf_count; i++) { + drm_buf_t *buf = dma->buflist[i]; + drm_i830_buf_priv_t *buf_priv = buf->dev_private; + + if (buf->filp == filp && buf_priv) { + int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, + I830_BUF_FREE); + + if (used == I830_BUF_CLIENT) + DRM_DEBUG("reclaimed from client\n"); + if (buf_priv->currently_mapped == I830_BUF_MAPPED) + buf_priv->currently_mapped = I830_BUF_UNMAPPED; + } + } +} + +static int i830_flush_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + + LOCK_TEST_WITH_RETURN(dev, filp); + + i830_flush_queue(dev); + return 0; +} + +static int i830_dma_vertex(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_device_dma_t *dma = dev->dma; + drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; + u32 *hw_status = dev_priv->hw_status_page; + drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) + dev_priv->sarea_priv; + drm_i830_vertex_t vertex; + + if (copy_from_user + (&vertex, (drm_i830_vertex_t __user *) arg, sizeof(vertex))) + return -EFAULT; + + LOCK_TEST_WITH_RETURN(dev, filp); + + DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n", + vertex.idx, vertex.used, vertex.discard); + + if (vertex.idx < 0 || vertex.idx > dma->buf_count) + return -EINVAL; + + i830_dma_dispatch_vertex(dev, + dma->buflist[vertex.idx], + vertex.discard, vertex.used); + + sarea_priv->last_enqueue = dev_priv->counter - 1; + sarea_priv->last_dispatch = (int)hw_status[5]; + + return 0; +} + +static int i830_clear_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i830_clear_t clear; + + if (copy_from_user + (&clear, (drm_i830_clear_t __user *) arg, sizeof(clear))) + return -EFAULT; + + LOCK_TEST_WITH_RETURN(dev, filp); + + /* GH: Someone's doing nasty things... */ + if (!dev->dev_private) { + return -EINVAL; + } + + i830_dma_dispatch_clear(dev, clear.flags, + clear.clear_color, + clear.clear_depth, clear.clear_depthmask); + return 0; +} + +static int i830_swap_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + + DRM_DEBUG("i830_swap_bufs\n"); + + LOCK_TEST_WITH_RETURN(dev, filp); + + i830_dma_dispatch_swap(dev); + return 0; +} + +/* Not sure why this isn't set all the time: + */ +static void i830_do_init_pageflip(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; +} + +static int i830_do_cleanup_pageflip(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + if (dev_priv->current_page != 0) + i830_dma_dispatch_flip(dev); + + dev_priv->page_flipping = 0; + return 0; +} + +static int i830_flip_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + + LOCK_TEST_WITH_RETURN(dev, filp); + + if (!dev_priv->page_flipping) + i830_do_init_pageflip(dev); + + i830_dma_dispatch_flip(dev); + return 0; +} + +static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; + u32 *hw_status = dev_priv->hw_status_page; + drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) + dev_priv->sarea_priv; + + sarea_priv->last_dispatch = (int)hw_status[5]; + return 0; +} + +static int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + int retcode = 0; + drm_i830_dma_t d; + drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; + u32 *hw_status = dev_priv->hw_status_page; + drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) + dev_priv->sarea_priv; + + DRM_DEBUG("getbuf\n"); + if (copy_from_user(&d, (drm_i830_dma_t __user *) arg, sizeof(d))) + return -EFAULT; + + LOCK_TEST_WITH_RETURN(dev, filp); + + d.granted = 0; + + retcode = i830_dma_get_buffer(dev, &d, filp); + + DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n", + current->pid, retcode, d.granted); + + if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d))) + return -EFAULT; + sarea_priv->last_dispatch = (int)hw_status[5]; + + return retcode; +} + +static int i830_copybuf(struct inode *inode, + struct file *filp, unsigned int cmd, unsigned long arg) +{ + /* Never copy - 2.4.x doesn't need it */ + return 0; +} + +static int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return 0; +} + +static int i830_getparam(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_getparam_t param; + int value; + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return -EINVAL; + } + + if (copy_from_user + (¶m, (drm_i830_getparam_t __user *) arg, sizeof(param))) + return -EFAULT; + + switch (param.param) { + case I830_PARAM_IRQ_ACTIVE: + value = dev->irq_enabled; + break; + default: + return -EINVAL; + } + + if (copy_to_user(param.value, &value, sizeof(int))) { + DRM_ERROR("copy_to_user\n"); + return -EFAULT; + } + + return 0; +} + +static int i830_setparam(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_setparam_t param; + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return -EINVAL; + } + + if (copy_from_user + (¶m, (drm_i830_setparam_t __user *) arg, sizeof(param))) + return -EFAULT; + + switch (param.param) { + case I830_SETPARAM_USE_MI_BATCHBUFFER_START: + dev_priv->use_mi_batchbuffer_start = param.value; + break; + default: + return -EINVAL; + } + + return 0; +} + +int i830_driver_load(drm_device_t *dev, unsigned long flags) +{ + /* i830 has 4 more counters */ + dev->counters += 4; + dev->types[6] = _DRM_STAT_IRQ; + dev->types[7] = _DRM_STAT_PRIMARY; + dev->types[8] = _DRM_STAT_SECONDARY; + dev->types[9] = _DRM_STAT_DMA; + + return 0; +} + +void i830_driver_lastclose(drm_device_t * dev) +{ + i830_dma_cleanup(dev); +} + +void i830_driver_preclose(drm_device_t * dev, DRMFILE filp) +{ + if (dev->dev_private) { + drm_i830_private_t *dev_priv = dev->dev_private; + if (dev_priv->page_flipping) { + i830_do_cleanup_pageflip(dev); + } + } +} + +void i830_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +{ + i830_reclaim_buffers(dev, filp); +} + +int i830_driver_dma_quiescent(drm_device_t * dev) +{ + i830_dma_quiescent(dev); + return 0; +} + +drm_ioctl_desc_t i830_ioctls[] = { + [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH} +}; + +int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); + +/** + * Determine if the device really is AGP or not. + * + * All Intel graphics chipsets are treated as AGP, even if they are really + * PCI-e. + * + * \param dev The device to be tested. + * + * \returns + * A value of 1 is always retured to indictate every i8xx is AGP. + */ +int i830_driver_device_is_agp(drm_device_t * dev) +{ + return 1; +} diff --git a/nx-X11/extras/drm/linux-core/i830_drm.h b/nx-X11/extras/drm/linux-core/i830_drm.h new file mode 100644 index 000000000..7283f7940 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i830_drm.h @@ -0,0 +1,342 @@ +#ifndef _I830_DRM_H_ +#define _I830_DRM_H_ + +/* WARNING: These defines must be the same as what the Xserver uses. + * if you change them, you must change the defines in the Xserver. + * + * KW: Actually, you can't ever change them because doing so would + * break backwards compatibility. + */ + +#ifndef _I830_DEFINES_ +#define _I830_DEFINES_ + +#define I830_DMA_BUF_ORDER 12 +#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER) +#define I830_DMA_BUF_NR 256 +#define I830_NR_SAREA_CLIPRECTS 8 + +/* Each region is a minimum of 64k, and there are at most 64 of them. + */ +#define I830_NR_TEX_REGIONS 64 +#define I830_LOG_MIN_TEX_REGION_SIZE 16 + +/* KW: These aren't correct but someone set them to two and then + * released the module. Now we can't change them as doing so would + * break backwards compatibility. + */ +#define I830_TEXTURE_COUNT 2 +#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT + +#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */ + +#define I830_UPLOAD_CTX 0x1 +#define I830_UPLOAD_BUFFERS 0x2 +#define I830_UPLOAD_CLIPRECTS 0x4 +#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */ +#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */ +#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */ +#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */ +#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */ +#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */ +#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */ +#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */ +#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2)) +#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2)) +#define I830_UPLOAD_TEXIMAGE_MASK 0xff00 +#define I830_UPLOAD_TEX0 0x10000 +#define I830_UPLOAD_TEX1 0x20000 +#define I830_UPLOAD_TEX2 0x40000 +#define I830_UPLOAD_TEX3 0x80000 +#define I830_UPLOAD_TEX_N(n) (0x10000 << (n)) +#define I830_UPLOAD_TEX_MASK 0xf0000 +#define I830_UPLOAD_TEXBLEND0 0x100000 +#define I830_UPLOAD_TEXBLEND1 0x200000 +#define I830_UPLOAD_TEXBLEND2 0x400000 +#define I830_UPLOAD_TEXBLEND3 0x800000 +#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n)) +#define I830_UPLOAD_TEXBLEND_MASK 0xf00000 +#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n)) +#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000 +#define I830_UPLOAD_STIPPLE 0x8000000 + +/* Indices into buf.Setup where various bits of state are mirrored per + * context and per buffer. These can be fired at the card as a unit, + * or in a piecewise fashion as required. + */ + +/* Destbuffer state + * - backbuffer linear offset and pitch -- invarient in the current dri + * - zbuffer linear offset and pitch -- also invarient + * - drawing origin in back and depth buffers. + * + * Keep the depth/back buffer state here to accommodate private buffers + * in the future. + */ + +#define I830_DESTREG_CBUFADDR 0 +#define I830_DESTREG_DBUFADDR 1 +#define I830_DESTREG_DV0 2 +#define I830_DESTREG_DV1 3 +#define I830_DESTREG_SENABLE 4 +#define I830_DESTREG_SR0 5 +#define I830_DESTREG_SR1 6 +#define I830_DESTREG_SR2 7 +#define I830_DESTREG_DR0 8 +#define I830_DESTREG_DR1 9 +#define I830_DESTREG_DR2 10 +#define I830_DESTREG_DR3 11 +#define I830_DESTREG_DR4 12 +#define I830_DEST_SETUP_SIZE 13 + +/* Context state + */ +#define I830_CTXREG_STATE1 0 +#define I830_CTXREG_STATE2 1 +#define I830_CTXREG_STATE3 2 +#define I830_CTXREG_STATE4 3 +#define I830_CTXREG_STATE5 4 +#define I830_CTXREG_IALPHAB 5 +#define I830_CTXREG_STENCILTST 6 +#define I830_CTXREG_ENABLES_1 7 +#define I830_CTXREG_ENABLES_2 8 +#define I830_CTXREG_AA 9 +#define I830_CTXREG_FOGCOLOR 10 +#define I830_CTXREG_BLENDCOLR0 11 +#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */ +#define I830_CTXREG_VF 13 +#define I830_CTXREG_VF2 14 +#define I830_CTXREG_MCSB0 15 +#define I830_CTXREG_MCSB1 16 +#define I830_CTX_SETUP_SIZE 17 + +/* 1.3: Stipple state + */ +#define I830_STPREG_ST0 0 +#define I830_STPREG_ST1 1 +#define I830_STP_SETUP_SIZE 2 + +/* Texture state (per tex unit) + */ + +#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */ +#define I830_TEXREG_MI1 1 +#define I830_TEXREG_MI2 2 +#define I830_TEXREG_MI3 3 +#define I830_TEXREG_MI4 4 +#define I830_TEXREG_MI5 5 +#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */ +#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */ +#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */ +#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */ +#define I830_TEX_SETUP_SIZE 10 + +#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */ +#define I830_TEXREG_TM0S0 1 +#define I830_TEXREG_TM0S1 2 +#define I830_TEXREG_TM0S2 3 +#define I830_TEXREG_TM0S3 4 +#define I830_TEXREG_TM0S4 5 +#define I830_TEXREG_NOP0 6 /* noop */ +#define I830_TEXREG_NOP1 7 /* noop */ +#define I830_TEXREG_NOP2 8 /* noop */ +#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */ +#define __I830_TEX_SETUP_SIZE 10 + +#define I830_FRONT 0x1 +#define I830_BACK 0x2 +#define I830_DEPTH 0x4 + +#endif /* _I830_DEFINES_ */ + +typedef struct _drm_i830_init { + enum { + I830_INIT_DMA = 0x01, + I830_CLEANUP_DMA = 0x02 + } func; + unsigned int mmio_offset; + unsigned int buffers_offset; + int sarea_priv_offset; + unsigned int ring_start; + unsigned int ring_end; + unsigned int ring_size; + unsigned int front_offset; + unsigned int back_offset; + unsigned int depth_offset; + unsigned int w; + unsigned int h; + unsigned int pitch; + unsigned int pitch_bits; + unsigned int back_pitch; + unsigned int depth_pitch; + unsigned int cpp; +} drm_i830_init_t; + +/* Warning: If you change the SAREA structure you must change the Xserver + * structure as well */ + +typedef struct _drm_i830_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char in_use; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_i830_tex_region_t; + +typedef struct _drm_i830_sarea { + unsigned int ContextState[I830_CTX_SETUP_SIZE]; + unsigned int BufferState[I830_DEST_SETUP_SIZE]; + unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE]; + unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE]; + unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT]; + unsigned int Palette[2][256]; + unsigned int dirty; + + unsigned int nbox; + drm_clip_rect_t boxes[I830_NR_SAREA_CLIPRECTS]; + + /* Maintain an LRU of contiguous regions of texture space. If + * you think you own a region of texture memory, and it has an + * age different to the one you set, then you are mistaken and + * it has been stolen by another client. If global texAge + * hasn't changed, there is no need to walk the list. + * + * These regions can be used as a proxy for the fine-grained + * texture information of other clients - by maintaining them + * in the same lru which is used to age their own textures, + * clients have an approximate lru for the whole of global + * texture space, and can make informed decisions as to which + * areas to kick out. There is no need to choose whether to + * kick out your own texture or someone else's - simply eject + * them all in LRU order. + */ + + drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1]; + /* Last elt is sentinal */ + int texAge; /* last time texture was uploaded */ + int last_enqueue; /* last time a buffer was enqueued */ + int last_dispatch; /* age of the most recently dispatched buffer */ + int last_quiescent; /* */ + int ctxOwner; /* last context to upload state */ + + int vertex_prim; + + int pf_enabled; /* is pageflipping allowed? */ + int pf_active; + int pf_current_page; /* which buffer is being displayed? */ + + int perf_boxes; /* performance boxes to be displayed */ + + /* Here's the state for texunits 2,3: + */ + unsigned int TexState2[I830_TEX_SETUP_SIZE]; + unsigned int TexBlendState2[I830_TEXBLEND_SIZE]; + unsigned int TexBlendStateWordsUsed2; + + unsigned int TexState3[I830_TEX_SETUP_SIZE]; + unsigned int TexBlendState3[I830_TEXBLEND_SIZE]; + unsigned int TexBlendStateWordsUsed3; + + unsigned int StippleState[I830_STP_SETUP_SIZE]; +} drm_i830_sarea_t; + +/* Flags for perf_boxes + */ +#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */ +#define I830_BOX_FLIP 0x2 /* populated by kernel */ +#define I830_BOX_WAIT 0x4 /* populated by kernel & client */ +#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */ +#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */ + +/* I830 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_I830_INIT 0x00 +#define DRM_I830_VERTEX 0x01 +#define DRM_I830_CLEAR 0x02 +#define DRM_I830_FLUSH 0x03 +#define DRM_I830_GETAGE 0x04 +#define DRM_I830_GETBUF 0x05 +#define DRM_I830_SWAP 0x06 +#define DRM_I830_COPY 0x07 +#define DRM_I830_DOCOPY 0x08 +#define DRM_I830_FLIP 0x09 +#define DRM_I830_IRQ_EMIT 0x0a +#define DRM_I830_IRQ_WAIT 0x0b +#define DRM_I830_GETPARAM 0x0c +#define DRM_I830_SETPARAM 0x0d + +#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t) +#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t) +#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t) +#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH) +#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE) +#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t) +#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP) +#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t) +#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY) +#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP) +#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t) +#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t) +#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t) +#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t) + +typedef struct _drm_i830_clear { + int clear_color; + int clear_depth; + int flags; + unsigned int clear_colormask; + unsigned int clear_depthmask; +} drm_i830_clear_t; + +/* These may be placeholders if we have more cliprects than + * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to + * false, indicating that the buffer will be dispatched again with a + * new set of cliprects. + */ +typedef struct _drm_i830_vertex { + int idx; /* buffer index */ + int used; /* nr bytes in use */ + int discard; /* client is finished with the buffer? */ +} drm_i830_vertex_t; + +typedef struct _drm_i830_copy_t { + int idx; /* buffer index */ + int used; /* nr bytes in use */ + void __user *address; /* Address to copy from */ +} drm_i830_copy_t; + +typedef struct drm_i830_dma { + void __user *virtual; + int request_idx; + int request_size; + int granted; +} drm_i830_dma_t; + +/* 1.3: Userspace can request & wait on irq's: + */ +typedef struct drm_i830_irq_emit { + int __user *irq_seq; +} drm_i830_irq_emit_t; + +typedef struct drm_i830_irq_wait { + int irq_seq; +} drm_i830_irq_wait_t; + +/* 1.3: New ioctl to query kernel params: + */ +#define I830_PARAM_IRQ_ACTIVE 1 + +typedef struct drm_i830_getparam { + int param; + int __user *value; +} drm_i830_getparam_t; + +/* 1.3: New ioctl to set kernel params: + */ +#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1 + +typedef struct drm_i830_setparam { + int param; + int value; +} drm_i830_setparam_t; + +#endif /* _I830_DRM_H_ */ diff --git a/nx-X11/extras/drm/linux-core/i830_drv.c b/nx-X11/extras/drm/linux-core/i830_drv.c new file mode 100644 index 000000000..b58b842e3 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i830_drv.c @@ -0,0 +1,122 @@ +/* i830_drv.c -- I810 driver -*- linux-c -*- + * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * Gareth Hughes <gareth@valinux.com> + * Abraham vd Merwe <abraham@2d3d.co.za> + * Keith Whitwell <keith@tungstengraphics.com> + */ + +#include <linux/config.h> + +#include "drmP.h" +#include "drm.h" +#include "i830_drm.h" +#include "i830_drv.h" + +#include "drm_pciids.h" + + +static struct pci_device_id pciidlist[] = { + i830_PCI_IDS +}; + +extern drm_ioctl_desc_t i830_ioctls[]; +extern int i830_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | + DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, +#if USE_IRQS + .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ, +#endif + .dev_priv_size = sizeof(drm_i830_buf_priv_t), + .load = i830_driver_load, + .lastclose = i830_driver_lastclose, + .preclose = i830_driver_preclose, + .device_is_agp = i830_driver_device_is_agp, + .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked, + .dma_quiescent = i830_driver_dma_quiescent, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, +#if USE_IRQS + .irq_preinstall = i830_driver_irq_preinstall, + .irq_postinstall = i830_driver_irq_postinstall, + .irq_uninstall = i830_driver_irq_uninstall, + .irq_handler = i830_driver_irq_handler, +#endif + .ioctls = i830_ioctls, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + +static int __init i830_init(void) +{ + driver.num_ioctls = i830_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit i830_exit(void) +{ + drm_exit(&driver); +} + +module_init(i830_init); +module_exit(i830_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/i830_drv.h b/nx-X11/extras/drm/linux-core/i830_drv.h new file mode 100644 index 000000000..1ab980b4f --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i830_drv.h @@ -0,0 +1,295 @@ +/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*- + * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@valinux.com> + * Jeff Hartmann <jhartmann@valinux.com> + * + */ + +#ifndef _I830_DRV_H_ +#define _I830_DRV_H_ + +/* General customization: + */ + +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "i830" +#define DRIVER_DESC "Intel 830M" +#define DRIVER_DATE "20021108" + +/* Interface history: + * + * 1.1: Original. + * 1.2: ? + * 1.3: New irq emit/wait ioctls. + * New pageflip ioctl. + * New getparam ioctl. + * State for texunits 3&4 in sarea. + * New (alternative) layout for texture state. + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 3 +#define DRIVER_PATCHLEVEL 2 + +/* Driver will work either way: IRQ's save cpu time when waiting for + * the card, but are subject to subtle interactions between bios, + * hardware and the driver. + */ +/* XXX: Add vblank support? */ +#define USE_IRQS 0 + +typedef struct drm_i830_buf_priv { + u32 *in_use; + int my_use_idx; + int currently_mapped; + void __user *virtual; + void *kernel_virtual; +} drm_i830_buf_priv_t; + +typedef struct _drm_i830_ring_buffer { + int tail_mask; + unsigned long Start; + unsigned long End; + unsigned long Size; + u8 *virtual_start; + int head; + int tail; + int space; +} drm_i830_ring_buffer_t; + +typedef struct drm_i830_private { + drm_map_t *sarea_map; + drm_map_t *mmio_map; + + drm_i830_sarea_t *sarea_priv; + drm_i830_ring_buffer_t ring; + + void *hw_status_page; + unsigned long counter; + + dma_addr_t dma_status_page; + + drm_buf_t *mmap_buffer; + + u32 front_di1, back_di1, zi1; + + int back_offset; + int depth_offset; + int front_offset; + int w, h; + int pitch; + int back_pitch; + int depth_pitch; + unsigned int cpp; + + int do_boxes; + int dma_used; + + int current_page; + int page_flipping; + + wait_queue_head_t irq_queue; + atomic_t irq_received; + atomic_t irq_emitted; + + int use_mi_batchbuffer_start; + +} drm_i830_private_t; + +/* i830_dma.c */ +extern void i830_reclaim_buffers(drm_device_t *dev, struct file *filp); + +/* i830_irq.c */ +extern int i830_irq_emit(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i830_irq_wait(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS); +extern void i830_driver_irq_preinstall(drm_device_t * dev); +extern void i830_driver_irq_postinstall(drm_device_t * dev); +extern void i830_driver_irq_uninstall(drm_device_t * dev); +extern int i830_driver_load(struct drm_device *, unsigned long flags); +extern void i830_driver_preclose(drm_device_t * dev, DRMFILE filp); +extern void i830_driver_lastclose(drm_device_t * dev); +extern void i830_driver_reclaim_buffers_locked(drm_device_t * dev, + struct file *filp); +extern int i830_driver_dma_quiescent(drm_device_t * dev); +extern int i830_driver_device_is_agp(drm_device_t * dev); + +#define I830_BASE(reg) ((unsigned long) \ + dev_priv->mmio_map->handle) +#define I830_ADDR(reg) (I830_BASE(reg) + reg) +#define I830_DEREF(reg) *(__volatile__ unsigned int *)I830_ADDR(reg) +#define I830_READ(reg) readl((volatile u32 *)I830_ADDR(reg)) +#define I830_WRITE(reg,val) writel(val, (volatile u32 *)I830_ADDR(reg)) +#define I830_DEREF16(reg) *(__volatile__ u16 *)I830_ADDR(reg) +#define I830_READ16(reg) I830_DEREF16(reg) +#define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0) + +#define I830_VERBOSE 0 + +#define RING_LOCALS unsigned int outring, ringmask, outcount; \ + volatile char *virt; + +#define BEGIN_LP_RING(n) do { \ + if (I830_VERBOSE) \ + printk("BEGIN_LP_RING(%d) in %s\n", \ + n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i830_wait_ring(dev, n*4, __FUNCTION__); \ + outcount = 0; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ +} while (0) + +#define OUT_RING(n) do { \ + if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outcount++; \ + outring += 4; \ + outring &= ringmask; \ +} while (0) + +#define ADVANCE_LP_RING() do { \ + if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \ + dev_priv->ring.tail = outring; \ + dev_priv->ring.space -= outcount * 4; \ + I830_WRITE(LP_RING + RING_TAIL, outring); \ +} while(0) + +extern int i830_wait_ring(drm_device_t * dev, int n, const char *caller); + +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) +#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) +#define CMD_REPORT_HEAD (7<<23) +#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) +#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) + +#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16)) +#define LOAD_TEXTURE_MAP0 (1<<11) + +#define INST_PARSER_CLIENT 0x00000000 +#define INST_OP_FLUSH 0x02000000 +#define INST_FLUSH_MAP_CACHE 0x00000001 + +#define BB1_START_ADDR_MASK (~0x7) +#define BB1_PROTECTED (1<<0) +#define BB1_UNPROTECTED (0<<0) +#define BB2_END_ADDR_MASK (~0x7) + +#define I830REG_HWSTAM 0x02098 +#define I830REG_INT_IDENTITY_R 0x020a4 +#define I830REG_INT_MASK_R 0x020a8 +#define I830REG_INT_ENABLE_R 0x020a0 + +#define I830_IRQ_RESERVED ((1<<13)|(3<<2)) + +#define LP_RING 0x2030 +#define HP_RING 0x2040 +#define RING_TAIL 0x00 +#define TAIL_ADDR 0x001FFFF8 +#define RING_HEAD 0x04 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START 0x08 +#define START_ADDR 0x0xFFFFF000 +#define RING_LEN 0x0C +#define RING_NR_PAGES 0x001FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 + +#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define SC_UPDATE_SCISSOR (0x1<<1) +#define SC_ENABLE_MASK (0x1<<0) +#define SC_ENABLE (0x1<<0) + +#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) +#define SCI_YMIN_MASK (0xffff<<16) +#define SCI_XMIN_MASK (0xffff<<0) +#define SCI_YMAX_MASK (0xffff<<16) +#define SCI_XMAX_MASK (0xffff<<0) + +#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) +#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) +#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) +#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) +#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24)) + +#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) + +#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) +#define ASYNC_FLIP (1<<22) + +#define CMD_3D (0x3<<29) +#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16)) +#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16)) + +#define BR00_BITBLT_CLIENT 0x40000000 +#define BR00_OP_COLOR_BLT 0x10000000 +#define BR00_OP_SRC_COPY_BLT 0x10C00000 +#define BR13_SOLID_PATTERN 0x80000000 + +#define BUF_3D_ID_COLOR_BACK (0x3<<24) +#define BUF_3D_ID_DEPTH (0x7<<24) +#define BUF_3D_USE_FENCE (1<<23) +#define BUF_3D_PITCH(x) (((x)/4)<<2) + +#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255) +#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8)) +#define MAP_PALETTE_BOTH (1<<11) + +#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4) +#define XY_COLOR_BLT_WRITE_ALPHA (1<<21) +#define XY_COLOR_BLT_WRITE_RGB (1<<20) + +#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) +#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) +#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) + +#define MI_BATCH_BUFFER ((0x30<<23)|1) +#define MI_BATCH_BUFFER_START (0x31<<23) +#define MI_BATCH_BUFFER_END (0xA<<23) +#define MI_BATCH_NON_SECURE (1) + +#define MI_WAIT_FOR_EVENT ((0x3<<23)) +#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) +#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) + +#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23)) + +#endif diff --git a/nx-X11/extras/drm/linux-core/i830_irq.c b/nx-X11/extras/drm/linux-core/i830_irq.c new file mode 100644 index 000000000..a6d264ca5 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i830_irq.c @@ -0,0 +1,199 @@ +/* i830_dma.c -- DMA support for the I830 -*- linux-c -*- + * + * Copyright 2002 Tungsten Graphics, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Keith Whitwell <keith@tungstengraphics.com> + * + */ + +#include <linux/interrupt.h> /* For task queue support */ +#include <linux/delay.h> + +#include "drmP.h" +#include "drm.h" +#include "i830_drm.h" +#include "i830_drv.h" + +irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS) +{ + drm_device_t *dev = (drm_device_t *) arg; + drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; + u16 temp; + + temp = I830_READ16(I830REG_INT_IDENTITY_R); + DRM_DEBUG("%x\n", temp); + + if (!(temp & 2)) + return IRQ_NONE; + + I830_WRITE16(I830REG_INT_IDENTITY_R, temp); + + atomic_inc(&dev_priv->irq_received); + wake_up_interruptible(&dev_priv->irq_queue); + + return IRQ_HANDLED; +} + +static int i830_emit_irq(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG("%s\n", __FUNCTION__); + + atomic_inc(&dev_priv->irq_emitted); + + BEGIN_LP_RING(2); + OUT_RING(0); + OUT_RING(GFX_OP_USER_INTERRUPT); + ADVANCE_LP_RING(); + + return atomic_read(&dev_priv->irq_emitted); +} + +static int i830_wait_irq(drm_device_t * dev, int irq_nr) +{ + drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; + DECLARE_WAITQUEUE(entry, current); + unsigned long end = jiffies + HZ * 3; + int ret = 0; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if (atomic_read(&dev_priv->irq_received) >= irq_nr) + return 0; + + dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; + + add_wait_queue(&dev_priv->irq_queue, &entry); + + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if (atomic_read(&dev_priv->irq_received) >= irq_nr) + break; + if ((signed)(end - jiffies) <= 0) { + DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n", + I830_READ16(I830REG_INT_IDENTITY_R), + I830_READ16(I830REG_INT_MASK_R), + I830_READ16(I830REG_INT_ENABLE_R), + I830_READ16(I830REG_HWSTAM)); + + ret = -EBUSY; /* Lockup? Missed irq? */ + break; + } + schedule_timeout(HZ * 3); + if (signal_pending(current)) { + ret = -EINTR; + break; + } + } + + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->irq_queue, &entry); + return ret; +} + +/* Needs the lock as it touches the ring. + */ +int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_irq_emit_t emit; + int result; + + LOCK_TEST_WITH_RETURN(dev, filp); + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return -EINVAL; + } + + if (copy_from_user + (&emit, (drm_i830_irq_emit_t __user *) arg, sizeof(emit))) + return -EFAULT; + + result = i830_emit_irq(dev); + + if (copy_to_user(emit.irq_seq, &result, sizeof(int))) { + DRM_ERROR("copy_to_user\n"); + return -EFAULT; + } + + return 0; +} + +/* Doesn't need the hardware lock. + */ +int i830_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_irq_wait_t irqwait; + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return -EINVAL; + } + + if (copy_from_user(&irqwait, (drm_i830_irq_wait_t __user *) arg, + sizeof(irqwait))) + return -EFAULT; + + return i830_wait_irq(dev, irqwait.irq_seq); +} + +/* drm_dma.h hooks +*/ +void i830_driver_irq_preinstall(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; + + I830_WRITE16(I830REG_HWSTAM, 0xffff); + I830_WRITE16(I830REG_INT_MASK_R, 0x0); + I830_WRITE16(I830REG_INT_ENABLE_R, 0x0); + atomic_set(&dev_priv->irq_received, 0); + atomic_set(&dev_priv->irq_emitted, 0); + init_waitqueue_head(&dev_priv->irq_queue); +} + +void i830_driver_irq_postinstall(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; + + I830_WRITE16(I830REG_INT_ENABLE_R, 0x2); +} + +void i830_driver_irq_uninstall(drm_device_t * dev) +{ + drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; + if (!dev_priv) + return; + + I830_WRITE16(I830REG_INT_MASK_R, 0xffff); + I830_WRITE16(I830REG_INT_ENABLE_R, 0x0); +} diff --git a/nx-X11/extras/drm/linux-core/i915_drv.c b/nx-X11/extras/drm/linux-core/i915_drv.c new file mode 100644 index 000000000..8a8eb5982 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i915_drv.c @@ -0,0 +1,109 @@ +/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- + */ +/************************************************************************** + * + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + i915_PCI_IDS +}; + +extern drm_ioctl_desc_t i915_ioctls[]; +extern int i915_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + /* don't use mtrr's here, the Xserver or user space app should + * deal with them for intel hardware. + */ + .driver_features = + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */ + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, + .load = i915_driver_load, + .lastclose = i915_driver_lastclose, + .preclose = i915_driver_preclose, + .device_is_agp = i915_driver_device_is_agp, + .irq_preinstall = i915_driver_irq_preinstall, + .irq_postinstall = i915_driver_irq_postinstall, + .irq_uninstall = i915_driver_irq_uninstall, + .irq_handler = i915_driver_irq_handler, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = i915_ioctls, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static int __init i915_init(void) +{ + driver.num_ioctls = i915_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit i915_exit(void) +{ + drm_exit(&driver); +} + +module_init(i915_init); +module_exit(i915_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/i915_ioc32.c b/nx-X11/extras/drm/linux-core/i915_ioc32.c new file mode 100644 index 000000000..fe009e1b3 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/i915_ioc32.c @@ -0,0 +1,221 @@ +/** + * \file i915_ioc32.c + * + * 32-bit ioctl compatibility routines for the i915 DRM. + * + * \author Alan Hourihane <alanh@fairlite.demon.co.uk> + * + * + * Copyright (C) Paul Mackerras 2005 + * Copyright (C) Alan Hourihane 2005 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#include <linux/compat.h> +#include <linux/ioctl32.h> + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" + +typedef struct _drm_i915_batchbuffer32 { + int start; /* agp offset */ + int used; /* nr bytes in use */ + int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ + int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ + int num_cliprects; /* mulitpass with multiple cliprects? */ + u32 cliprects; /* pointer to userspace cliprects */ +} drm_i915_batchbuffer32_t; + +static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_batchbuffer32_t batchbuffer32; + drm_i915_batchbuffer_t __user *batchbuffer; + + if (copy_from_user(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32))) + return -EFAULT; + + batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer)); + if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer)) + || __put_user(batchbuffer32.start, &batchbuffer->start) + || __put_user(batchbuffer32.used, &batchbuffer->used) + || __put_user(batchbuffer32.DR1, &batchbuffer->DR1) + || __put_user(batchbuffer32.DR4, &batchbuffer->DR4) + || __put_user(batchbuffer32.num_cliprects, &batchbuffer->num_cliprects) + || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects, + &batchbuffer->cliprects)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_BATCHBUFFER, (unsigned long) batchbuffer); +} + +typedef struct _drm_i915_cmdbuffer32 { + u32 buf; /* pointer to userspace command buffer */ + int sz; /* nr bytes in buf */ + int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ + int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ + int num_cliprects; /* mulitpass with multiple cliprects? */ + u32 cliprects; /* pointer to userspace cliprects */ +} drm_i915_cmdbuffer32_t; + +static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_cmdbuffer32_t cmdbuffer32; + drm_i915_cmdbuffer_t __user *cmdbuffer; + + if (copy_from_user(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32))) + return -EFAULT; + + cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer)); + if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer)) + || __put_user((int __user *)(unsigned long)cmdbuffer32.buf, + &cmdbuffer->buf) + || __put_user(cmdbuffer32.sz, &cmdbuffer->sz) + || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1) + || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4) + || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects) + || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects, + &cmdbuffer->cliprects)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer); +} + +typedef struct drm_i915_irq_emit32 { + u32 irq_seq; +} drm_i915_irq_emit32_t; + +static int compat_i915_irq_emit(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_irq_emit32_t req32; + drm_i915_irq_emit_t __user *request; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user((int __user *)(unsigned long)req32.irq_seq, + &request->irq_seq)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_IRQ_EMIT, (unsigned long) request); +} +typedef struct drm_i915_getparam32 { + int param; + u32 value; +} drm_i915_getparam32_t; + +static int compat_i915_getparam(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_getparam32_t req32; + drm_i915_getparam_t __user *request; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.param, &request->param) + || __put_user((void __user *)(unsigned long)req32.value, + &request->value)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_GETPARAM, (unsigned long) request); +} + +typedef struct drm_i915_mem_alloc32 { + int region; + int alignment; + int size; + u32 region_offset; /* offset from start of fb or agp */ +} drm_i915_mem_alloc32_t; + +static int compat_i915_alloc(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_mem_alloc32_t req32; + drm_i915_mem_alloc_t __user *request; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.region, &request->region) + || __put_user(req32.alignment, &request->alignment) + || __put_user(req32.size, &request->size) + || __put_user((void __user *)(unsigned long)req32.region_offset, + &request->region_offset)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_ALLOC, (unsigned long) request); +} + + +drm_ioctl_compat_t *i915_compat_ioctls[] = { + [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, + [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, + [DRM_I915_GETPARAM] = compat_i915_getparam, + [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit, + [DRM_I915_ALLOC] = compat_i915_alloc +}; + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card<n>. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long i915_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) + fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; + + lock_kernel(); /* XXX for now */ + if (fn != NULL) + ret = (*fn)(filp, cmd, arg); + else + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} diff --git a/nx-X11/extras/drm/linux-core/imagine_drv.c b/nx-X11/extras/drm/linux-core/imagine_drv.c new file mode 100644 index 000000000..bec2fae45 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/imagine_drv.c @@ -0,0 +1,86 @@ +/* + * Copyright 2005 Adam Jackson. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* derived from tdfx_drv.c */ + +#include <linux/config.h> +#include "drmP.h" +#include "imagine_drv.h" + +#include "drm_pciids.h" + +static struct drm_driver driver; + +static struct pci_device_id pciidlist[] = { + imagine_PCI_IDS +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static struct drm_driver driver = { + .driver_features = DRIVER_USE_MTRR, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int __init imagine_init(void) +{ + return drm_init(&driver, pciidlist); +} + +static void __exit imagine_exit(void) +{ + drm_exit(&driver); +} + +module_init(imagine_init); +module_exit(imagine_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/mach64_drv.c b/nx-X11/extras/drm/linux-core/mach64_drv.c new file mode 100644 index 000000000..bcf60b59b --- /dev/null +++ b/nx-X11/extras/drm/linux-core/mach64_drv.c @@ -0,0 +1,107 @@ +/* mach64_drv.c -- mach64 (Rage Pro) driver -*- linux-c -*- + * Created: Fri Nov 24 18:34:32 2000 by gareth@valinux.com + * + * Copyright 2000 Gareth Hughes + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * GARETH HUGHES BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Gareth Hughes <gareth@valinux.com> + * Leif Delgass <ldelgass@retinalburn.net> + */ + +#include <linux/config.h> +#include "drmP.h" +#include "drm.h" +#include "mach64_drm.h" +#include "mach64_drv.h" + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + mach64_PCI_IDS +}; + +extern drm_ioctl_desc_t mach64_ioctls[]; +extern int mach64_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA + | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, + .lastclose = mach64_driver_lastclose, + .vblank_wait = mach64_driver_vblank_wait, + .irq_preinstall = mach64_driver_irq_preinstall, + .irq_postinstall = mach64_driver_irq_postinstall, + .irq_uninstall = mach64_driver_irq_uninstall, + .irq_handler = mach64_driver_irq_handler, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = mach64_ioctls, + .dma_ioctl = mach64_dma_buffers, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + +static int __init mach64_init(void) +{ + driver.num_ioctls = mach64_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit mach64_exit(void) +{ + drm_exit(&driver); +} + +module_init(mach64_init); +module_exit(mach64_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/mga_drv.c b/nx-X11/extras/drm/linux-core/mga_drv.c new file mode 100644 index 000000000..1a5599275 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/mga_drv.c @@ -0,0 +1,154 @@ +/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*- + * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Gareth Hughes <gareth@valinux.com> + */ + +#include <linux/config.h> +#include "drmP.h" +#include "drm.h" +#include "mga_drm.h" +#include "mga_drv.h" + +#include "drm_pciids.h" + +static int mga_driver_device_is_agp(drm_device_t * dev); + +static struct pci_device_id pciidlist[] = { + mga_PCI_IDS +}; + +extern drm_ioctl_desc_t mga_ioctls[]; +extern int mga_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | + DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | + DRIVER_IRQ_VBL, + .load = mga_driver_load, + .unload = mga_driver_unload, + .lastclose = mga_driver_lastclose, + .dma_quiescent = mga_driver_dma_quiescent, + .device_is_agp = mga_driver_device_is_agp, + .vblank_wait = mga_driver_vblank_wait, + .irq_preinstall = mga_driver_irq_preinstall, + .irq_postinstall = mga_driver_irq_postinstall, + .irq_uninstall = mga_driver_irq_uninstall, + .irq_handler = mga_driver_irq_handler, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = mga_ioctls, + .dma_ioctl = mga_dma_buffers, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = mga_compat_ioctl, +#endif + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + +static int __init mga_init(void) +{ + driver.num_ioctls = mga_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit mga_exit(void) +{ + drm_exit(&driver); +} + +module_init(mga_init); +module_exit(mga_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); + +/** + * Determine if the device really is AGP or not. + * + * In addition to the usual tests performed by \c drm_device_is_agp, this + * function detects PCI G450 cards that appear to the system exactly like + * AGP G450 cards. + * + * \param dev The device to be tested. + * + * \returns + * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. + */ +int mga_driver_device_is_agp(drm_device_t * dev) +{ + const struct pci_dev * const pdev = dev->pdev; + + + /* There are PCI versions of the G450. These cards have the + * same PCI ID as the AGP G450, but have an additional PCI-to-PCI + * bridge chip. We detect these cards, which are not currently + * supported by this driver, by looking at the device ID of the + * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the + * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the + * device. + */ + + if ( (pdev->device == 0x0525) + && (pdev->bus->self->vendor == 0x3388) + && (pdev->bus->self->device == 0x0021) ) { + return 0; + } + + return 2; +} diff --git a/nx-X11/extras/drm/linux-core/mga_ioc32.c b/nx-X11/extras/drm/linux-core/mga_ioc32.c new file mode 100644 index 000000000..5775cd638 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/mga_ioc32.c @@ -0,0 +1,235 @@ + +/** + * \file mga_ioc32.c + * + * 32-bit ioctl compatibility routines for the MGA DRM. + * + * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich + * + * + * Copyright (C) Paul Mackerras 2005 + * Copyright (C) Egbert Eich 2003,2004 + * Copyright (C) Dave Airlie 2005 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#include <linux/compat.h> +#include <linux/ioctl32.h> + +#include "drmP.h" +#include "drm.h" +#include "mga_drm.h" + +typedef struct drm32_mga_init { + int func; + u32 sarea_priv_offset; + int chipset; + int sgram; + unsigned int maccess; + unsigned int fb_cpp; + unsigned int front_offset, front_pitch; + unsigned int back_offset, back_pitch; + unsigned int depth_cpp; + unsigned int depth_offset, depth_pitch; + unsigned int texture_offset[MGA_NR_TEX_HEAPS]; + unsigned int texture_size[MGA_NR_TEX_HEAPS]; + u32 fb_offset; + u32 mmio_offset; + u32 status_offset; + u32 warp_offset; + u32 primary_offset; + u32 buffers_offset; +} drm_mga_init32_t; + +static int compat_mga_init(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_mga_init32_t init32; + drm_mga_init_t __user *init; + int err = 0, i; + + if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) + return -EFAULT; + + init = compat_alloc_user_space(sizeof(*init)); + if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) + || __put_user(init32.func, &init->func) + || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) + || __put_user(init32.chipset, &init->chipset) + || __put_user(init32.sgram, &init->sgram) + || __put_user(init32.maccess, &init->maccess) + || __put_user(init32.fb_cpp, &init->fb_cpp) + || __put_user(init32.front_offset, &init->front_offset) + || __put_user(init32.front_pitch, &init->front_pitch) + || __put_user(init32.back_offset, &init->back_offset) + || __put_user(init32.back_pitch, &init->back_pitch) + || __put_user(init32.depth_cpp, &init->depth_cpp) + || __put_user(init32.depth_offset, &init->depth_offset) + || __put_user(init32.depth_pitch, &init->depth_pitch) + || __put_user(init32.fb_offset, &init->fb_offset) + || __put_user(init32.mmio_offset, &init->mmio_offset) + || __put_user(init32.status_offset, &init->status_offset) + || __put_user(init32.warp_offset, &init->warp_offset) + || __put_user(init32.primary_offset, &init->primary_offset) + || __put_user(init32.buffers_offset, &init->buffers_offset)) + return -EFAULT; + + for (i=0; i<MGA_NR_TEX_HEAPS; i++) + { + err |= __put_user(init32.texture_offset[i], &init->texture_offset[i]); + err |= __put_user(init32.texture_size[i], &init->texture_size[i]); + } + if (err) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_MGA_INIT, (unsigned long) init); +} + + +typedef struct drm_mga_getparam32 { + int param; + u32 value; +} drm_mga_getparam32_t; + + +static int compat_mga_getparam(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_mga_getparam32_t getparam32; + drm_mga_getparam_t __user *getparam; + + if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32))) + return -EFAULT; + + getparam = compat_alloc_user_space(sizeof(*getparam)); + if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam)) + || __put_user(getparam32.param, &getparam->param) + || __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); +} + +typedef struct drm_mga_drm_bootstrap32 { + u32 texture_handle; + u32 texture_size; + u32 primary_size; + u32 secondary_bin_count; + u32 secondary_bin_size; + u32 agp_mode; + u8 agp_size; +} drm_mga_dma_bootstrap32_t; + +static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_mga_dma_bootstrap32_t dma_bootstrap32; + drm_mga_dma_bootstrap_t __user *dma_bootstrap; + int err; + + if (copy_from_user(&dma_bootstrap32, (void __user *)arg, + sizeof(dma_bootstrap32))) + return -EFAULT; + + dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap)); + if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap)) + || __put_user(dma_bootstrap32.texture_handle, + &dma_bootstrap->texture_handle) + || __put_user(dma_bootstrap32.texture_size, + &dma_bootstrap->texture_size) + || __put_user(dma_bootstrap32.primary_size, + &dma_bootstrap->primary_size) + || __put_user(dma_bootstrap32.secondary_bin_count, + &dma_bootstrap->secondary_bin_count) + || __put_user(dma_bootstrap32.secondary_bin_size, + &dma_bootstrap->secondary_bin_size) + || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode) + || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_MGA_DMA_BOOTSTRAP, + (unsigned long)dma_bootstrap); + if (err) + return err; + + if (__get_user(dma_bootstrap32.texture_handle, + &dma_bootstrap->texture_handle) + || __get_user(dma_bootstrap32.texture_size, + &dma_bootstrap->texture_size) + || __get_user(dma_bootstrap32.primary_size, + &dma_bootstrap->primary_size) + || __get_user(dma_bootstrap32.secondary_bin_count, + &dma_bootstrap->secondary_bin_count) + || __get_user(dma_bootstrap32.secondary_bin_size, + &dma_bootstrap->secondary_bin_size) + || __get_user(dma_bootstrap32.agp_mode, + &dma_bootstrap->agp_mode) + || __get_user(dma_bootstrap32.agp_size, + &dma_bootstrap->agp_size)) + return -EFAULT; + + if (copy_to_user((void __user *)arg, &dma_bootstrap32, + sizeof(dma_bootstrap32))) + return -EFAULT; + + return 0; +} + +drm_ioctl_compat_t *mga_compat_ioctls[] = { + [DRM_MGA_INIT] = compat_mga_init, + [DRM_MGA_GETPARAM] = compat_mga_getparam, + [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap, +}; + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card<n>. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long mga_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) + fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; + + lock_kernel(); /* XXX for now */ + if (fn != NULL) + ret = (*fn)(filp, cmd, arg); + else + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} diff --git a/nx-X11/extras/drm/linux-core/nv_drv.c b/nx-X11/extras/drm/linux-core/nv_drv.c new file mode 100644 index 000000000..a6afb0244 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/nv_drv.c @@ -0,0 +1,95 @@ +/* nv_drv.c -- nv driver -*- linux-c -*- + * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright 2005 Lars Knoll <lars@trolltech.com> + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Daryll Strauss <daryll@valinux.com> + * Gareth Hughes <gareth@valinux.com> + * Lars Knoll <lars@trolltech.com> + */ + +#include <linux/config.h> +#include "drmP.h" +#include "nv_drv.h" + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + nv_PCI_IDS +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = DRIVER_USE_MTRR | DRIVER_USE_AGP, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + +static int __init nv_init(void) +{ + return drm_init(&driver, pciidlist); +} + +static void __exit nv_exit(void) +{ + drm_exit(&driver); +} + +module_init(nv_init); +module_exit(nv_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/r128_drv.c b/nx-X11/extras/drm/linux-core/r128_drv.c new file mode 100644 index 000000000..a72232806 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/r128_drv.c @@ -0,0 +1,116 @@ +/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*- + * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Gareth Hughes <gareth@valinux.com> + */ + +#include <linux/config.h> +#include "drmP.h" +#include "drm.h" +#include "r128_drm.h" +#include "r128_drv.h" + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + r128_PCI_IDS +}; + +extern drm_ioctl_desc_t r128_ioctls[]; +extern int r128_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | + DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | + DRIVER_IRQ_VBL, + .dev_priv_size = sizeof(drm_r128_buf_priv_t), + .preclose = r128_driver_preclose, + .lastclose = r128_driver_lastclose, + .vblank_wait = r128_driver_vblank_wait, + .irq_preinstall = r128_driver_irq_preinstall, + .irq_postinstall = r128_driver_irq_postinstall, + .irq_uninstall = r128_driver_irq_uninstall, + .irq_handler = r128_driver_irq_handler, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = r128_ioctls, + .dma_ioctl = r128_cce_buffers, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = r128_compat_ioctl, +#endif + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + +static int __init r128_init(void) +{ + driver.num_ioctls = r128_max_ioctl; + + return drm_init(&driver, pciidlist); +} + +static void __exit r128_exit(void) +{ + drm_exit(&driver); +} + +module_init(r128_init); +module_exit(r128_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/r128_ioc32.c b/nx-X11/extras/drm/linux-core/r128_ioc32.c new file mode 100644 index 000000000..60598ef94 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/r128_ioc32.c @@ -0,0 +1,219 @@ +/** + * \file r128_ioc32.c + * + * 32-bit ioctl compatibility routines for the R128 DRM. + * + * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich + * + * Copyright (C) Paul Mackerras 2005 + * Copyright (C) Egbert Eich 2003,2004 + * Copyright (C) Dave Airlie 2005 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#include <linux/compat.h> +#include <linux/ioctl32.h> + +#include "drmP.h" +#include "drm.h" +#include "r128_drm.h" + +typedef struct drm_r128_init32 { + int func; + unsigned int sarea_priv_offset; + int is_pci; + int cce_mode; + int cce_secure; + int ring_size; + int usec_timeout; + + unsigned int fb_bpp; + unsigned int front_offset, front_pitch; + unsigned int back_offset, back_pitch; + unsigned int depth_bpp; + unsigned int depth_offset, depth_pitch; + unsigned int span_offset; + + unsigned int fb_offset; + unsigned int mmio_offset; + unsigned int ring_offset; + unsigned int ring_rptr_offset; + unsigned int buffers_offset; + unsigned int agp_textures_offset; +} drm_r128_init32_t; + +static int compat_r128_init(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_r128_init32_t init32; + drm_r128_init_t __user *init; + + if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) + return -EFAULT; + + init = compat_alloc_user_space(sizeof(*init)); + if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) + || __put_user(init32.func, &init->func) + || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) + || __put_user(init32.is_pci, &init->is_pci) + || __put_user(init32.cce_mode, &init->cce_mode) + || __put_user(init32.cce_secure, &init->cce_secure) + || __put_user(init32.ring_size, &init->ring_size) + || __put_user(init32.usec_timeout, &init->usec_timeout) + || __put_user(init32.fb_bpp, &init->fb_bpp) + || __put_user(init32.front_offset, &init->front_offset) + || __put_user(init32.front_pitch, &init->front_pitch) + || __put_user(init32.back_offset, &init->back_offset) + || __put_user(init32.back_pitch, &init->back_pitch) + || __put_user(init32.depth_bpp, &init->depth_bpp) + || __put_user(init32.depth_offset, &init->depth_offset) + || __put_user(init32.depth_pitch, &init->depth_pitch) + || __put_user(init32.span_offset, &init->span_offset) + || __put_user(init32.fb_offset, &init->fb_offset) + || __put_user(init32.mmio_offset, &init->mmio_offset) + || __put_user(init32.ring_offset, &init->ring_offset) + || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset) + || __put_user(init32.buffers_offset, &init->buffers_offset) + || __put_user(init32.agp_textures_offset, &init->agp_textures_offset)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_R128_INIT, (unsigned long)init); +} + + +typedef struct drm_r128_depth32 { + int func; + int n; + u32 x; + u32 y; + u32 buffer; + u32 mask; +} drm_r128_depth32_t; + +static int compat_r128_depth(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_r128_depth32_t depth32; + drm_r128_depth_t __user *depth; + + if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32))) + return -EFAULT; + + depth = compat_alloc_user_space(sizeof(*depth)); + if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth)) + || __put_user(depth32.func, &depth->func) + || __put_user(depth32.n, &depth->n) + || __put_user((int __user *)(unsigned long)depth32.x, &depth->x) + || __put_user((int __user *)(unsigned long)depth32.y, &depth->y) + || __put_user((unsigned int __user *)(unsigned long)depth32.buffer, &depth->buffer) + || __put_user((unsigned char __user *)(unsigned long)depth32.mask, &depth->mask)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_R128_DEPTH, (unsigned long)depth); + +} + +typedef struct drm_r128_stipple32 { + u32 mask; +} drm_r128_stipple32_t; + +static int compat_r128_stipple(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_r128_stipple32_t stipple32; + drm_r128_stipple_t __user *stipple; + + if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32))) + return -EFAULT; + + stipple = compat_alloc_user_space(sizeof(*stipple)); + if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple)) + || __put_user((unsigned int __user *)(unsigned long)stipple32.mask, &stipple->mask)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple); +} + +typedef struct drm_r128_getparam32 { + int param; + u32 value; +} drm_r128_getparam32_t; + +static int compat_r128_getparam(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_r128_getparam32_t getparam32; + drm_r128_getparam_t __user *getparam; + + if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32))) + return -EFAULT; + + getparam = compat_alloc_user_space(sizeof(*getparam)); + if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam)) + || __put_user(getparam32.param, &getparam->param) + || __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); +} + +drm_ioctl_compat_t *r128_compat_ioctls[] = { + [DRM_R128_INIT] = compat_r128_init, + [DRM_R128_DEPTH] = compat_r128_depth, + [DRM_R128_STIPPLE] = compat_r128_stipple, + [DRM_R128_GETPARAM] = compat_r128_getparam, +}; + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card<n>. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long r128_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) + fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; + + lock_kernel(); /* XXX for now */ + if (fn != NULL) + ret = (*fn)(filp, cmd, arg); + else + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} diff --git a/nx-X11/extras/drm/linux-core/radeon_drv.c b/nx-X11/extras/drm/linux-core/radeon_drv.c new file mode 100644 index 000000000..57c24f4a0 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/radeon_drv.c @@ -0,0 +1,136 @@ +/** + * \file radeon_drv.c + * ATI Radeon driver + * + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/config.h> +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon_drv.h" + +#include "drm_pciids.h" + +int radeon_no_wb; + +MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n"); +module_param_named(no_wb, radeon_no_wb, int, 0444); + +static int dri_library_name(struct drm_device * dev, char * buf) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + int family = dev_priv->flags & CHIP_FAMILY_MASK; + + return snprintf(buf, PAGE_SIZE, "%s\n", + (family < CHIP_R200) ? "radeon" : + ((family < CHIP_R300) ? "r200" : + "r300")); +} + +static struct pci_device_id pciidlist[] = { + radeon_PCI_IDS +}; + +extern drm_ioctl_desc_t radeon_ioctls[]; +extern int radeon_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | + DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | + DRIVER_IRQ_VBL, + .dev_priv_size = sizeof(drm_radeon_buf_priv_t), + .load = radeon_driver_load, + .firstopen = radeon_driver_firstopen, + .open = radeon_driver_open, + .preclose = radeon_driver_preclose, + .postclose = radeon_driver_postclose, + .lastclose = radeon_driver_lastclose, + .unload = radeon_driver_unload, + .vblank_wait = radeon_driver_vblank_wait, + .dri_library_name = dri_library_name, + .irq_preinstall = radeon_driver_irq_preinstall, + .irq_postinstall = radeon_driver_irq_postinstall, + .irq_uninstall = radeon_driver_irq_uninstall, + .irq_handler = radeon_driver_irq_handler, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = radeon_ioctls, + .dma_ioctl = radeon_cp_buffers, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = radeon_compat_ioctl, +#endif + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static int __init radeon_init(void) +{ + driver.num_ioctls = radeon_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit radeon_exit(void) +{ + drm_exit(&driver); +} + +module_init(radeon_init); +module_exit(radeon_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/radeon_ioc32.c b/nx-X11/extras/drm/linux-core/radeon_ioc32.c new file mode 100644 index 000000000..bfe612215 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/radeon_ioc32.c @@ -0,0 +1,395 @@ +/** + * \file radeon_ioc32.c + * + * 32-bit ioctl compatibility routines for the Radeon DRM. + * + * \author Paul Mackerras <paulus@samba.org> + * + * Copyright (C) Paul Mackerras 2005 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#include <linux/compat.h> +#include <linux/ioctl32.h> + +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon_drv.h" + +typedef struct drm_radeon_init32 { + int func; + u32 sarea_priv_offset; + int is_pci; + int cp_mode; + int gart_size; + int ring_size; + int usec_timeout; + + unsigned int fb_bpp; + unsigned int front_offset, front_pitch; + unsigned int back_offset, back_pitch; + unsigned int depth_bpp; + unsigned int depth_offset, depth_pitch; + + u32 fb_offset; + u32 mmio_offset; + u32 ring_offset; + u32 ring_rptr_offset; + u32 buffers_offset; + u32 gart_textures_offset; +} drm_radeon_init32_t; + +static int compat_radeon_cp_init(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_radeon_init32_t init32; + drm_radeon_init_t __user *init; + + if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) + return -EFAULT; + + init = compat_alloc_user_space(sizeof(*init)); + if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) + || __put_user(init32.func, &init->func) + || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) + || __put_user(init32.is_pci, &init->is_pci) + || __put_user(init32.cp_mode, &init->cp_mode) + || __put_user(init32.gart_size, &init->gart_size) + || __put_user(init32.ring_size, &init->ring_size) + || __put_user(init32.usec_timeout, &init->usec_timeout) + || __put_user(init32.fb_bpp, &init->fb_bpp) + || __put_user(init32.front_offset, &init->front_offset) + || __put_user(init32.front_pitch, &init->front_pitch) + || __put_user(init32.back_offset, &init->back_offset) + || __put_user(init32.back_pitch, &init->back_pitch) + || __put_user(init32.depth_bpp, &init->depth_bpp) + || __put_user(init32.depth_offset, &init->depth_offset) + || __put_user(init32.depth_pitch, &init->depth_pitch) + || __put_user(init32.fb_offset, &init->fb_offset) + || __put_user(init32.mmio_offset, &init->mmio_offset) + || __put_user(init32.ring_offset, &init->ring_offset) + || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset) + || __put_user(init32.buffers_offset, &init->buffers_offset) + || __put_user(init32.gart_textures_offset, + &init->gart_textures_offset)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_CP_INIT, (unsigned long) init); +} + +typedef struct drm_radeon_clear32 { + unsigned int flags; + unsigned int clear_color; + unsigned int clear_depth; + unsigned int color_mask; + unsigned int depth_mask; /* misnamed field: should be stencil */ + u32 depth_boxes; +} drm_radeon_clear32_t; + +static int compat_radeon_cp_clear(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_radeon_clear32_t clr32; + drm_radeon_clear_t __user *clr; + + if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32))) + return -EFAULT; + + clr = compat_alloc_user_space(sizeof(*clr)); + if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr)) + || __put_user(clr32.flags, &clr->flags) + || __put_user(clr32.clear_color, &clr->clear_color) + || __put_user(clr32.clear_depth, &clr->clear_depth) + || __put_user(clr32.color_mask, &clr->color_mask) + || __put_user(clr32.depth_mask, &clr->depth_mask) + || __put_user((void __user *)(unsigned long)clr32.depth_boxes, + &clr->depth_boxes)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_CLEAR, (unsigned long) clr); +} + +typedef struct drm_radeon_stipple32 { + u32 mask; +} drm_radeon_stipple32_t; + +static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_radeon_stipple32_t __user *argp = (void __user *) arg; + drm_radeon_stipple_t __user *request; + u32 mask; + + if (get_user(mask, &argp->mask)) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user((unsigned int __user *)(unsigned long) mask, + &request->mask)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_STIPPLE, (unsigned long) request); +} + +typedef struct drm_radeon_tex_image32 { + unsigned int x, y; /* Blit coordinates */ + unsigned int width, height; + u32 data; +} drm_radeon_tex_image32_t; + +typedef struct drm_radeon_texture32 { + unsigned int offset; + int pitch; + int format; + int width; /* Texture image coordinates */ + int height; + u32 image; +} drm_radeon_texture32_t; + +static int compat_radeon_cp_texture(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_radeon_texture32_t req32; + drm_radeon_texture_t __user *request; + drm_radeon_tex_image32_t img32; + drm_radeon_tex_image_t __user *image; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + if (req32.image == 0) + return -EINVAL; + if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image, + sizeof(img32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request) + sizeof(*image)); + if (!access_ok(VERIFY_WRITE, request, + sizeof(*request) + sizeof(*image))) + return -EFAULT; + image = (drm_radeon_tex_image_t __user *) (request + 1); + + if (__put_user(req32.offset, &request->offset) + || __put_user(req32.pitch, &request->pitch) + || __put_user(req32.format, &request->format) + || __put_user(req32.width, &request->width) + || __put_user(req32.height, &request->height) + || __put_user(image, &request->image) + || __put_user(img32.x, &image->x) + || __put_user(img32.y, &image->y) + || __put_user(img32.width, &image->width) + || __put_user(img32.height, &image->height) + || __put_user((const void __user *)(unsigned long)img32.data, + &image->data)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_TEXTURE, (unsigned long) request); +} + +typedef struct drm_radeon_vertex2_32 { + int idx; /* Index of vertex buffer */ + int discard; /* Client finished with buffer? */ + int nr_states; + u32 state; + int nr_prims; + u32 prim; +} drm_radeon_vertex2_32_t; + +static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_radeon_vertex2_32_t req32; + drm_radeon_vertex2_t __user *request; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.idx, &request->idx) + || __put_user(req32.discard, &request->discard) + || __put_user(req32.nr_states, &request->nr_states) + || __put_user((void __user *)(unsigned long)req32.state, + &request->state) + || __put_user(req32.nr_prims, &request->nr_prims) + || __put_user((void __user *)(unsigned long)req32.prim, + &request->prim)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_VERTEX2, (unsigned long) request); +} + +typedef struct drm_radeon_cmd_buffer32 { + int bufsz; + u32 buf; + int nbox; + u32 boxes; +} drm_radeon_cmd_buffer32_t; + +static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_radeon_cmd_buffer32_t req32; + drm_radeon_cmd_buffer_t __user *request; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.bufsz, &request->bufsz) + || __put_user((void __user *)(unsigned long)req32.buf, + &request->buf) + || __put_user(req32.nbox, &request->nbox) + || __put_user((void __user *)(unsigned long)req32.boxes, + &request->boxes)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_CMDBUF, (unsigned long) request); +} + +typedef struct drm_radeon_getparam32 { + int param; + u32 value; +} drm_radeon_getparam32_t; + +static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_radeon_getparam32_t req32; + drm_radeon_getparam_t __user *request; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.param, &request->param) + || __put_user((void __user *)(unsigned long)req32.value, + &request->value)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_GETPARAM, (unsigned long) request); +} + +typedef struct drm_radeon_mem_alloc32 { + int region; + int alignment; + int size; + u32 region_offset; /* offset from start of fb or GART */ +} drm_radeon_mem_alloc32_t; + +static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_radeon_mem_alloc32_t req32; + drm_radeon_mem_alloc_t __user *request; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.region, &request->region) + || __put_user(req32.alignment, &request->alignment) + || __put_user(req32.size, &request->size) + || __put_user((int __user *)(unsigned long)req32.region_offset, + &request->region_offset)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_ALLOC, (unsigned long) request); +} + +typedef struct drm_radeon_irq_emit32 { + u32 irq_seq; +} drm_radeon_irq_emit32_t; + +static int compat_radeon_irq_emit(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_radeon_irq_emit32_t req32; + drm_radeon_irq_emit_t __user *request; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user((int __user *)(unsigned long)req32.irq_seq, + &request->irq_seq)) + return -EFAULT; + + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request); +} + +drm_ioctl_compat_t *radeon_compat_ioctls[] = { + [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, + [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, + [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, + [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture, + [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2, + [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf, + [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam, + [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc, + [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit, +}; + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card<n>. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long radeon_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) + fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; + + lock_kernel(); /* XXX for now */ + if (fn != NULL) + ret = (*fn)(filp, cmd, arg); + else + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} diff --git a/nx-X11/extras/drm/linux-core/savage_drv.c b/nx-X11/extras/drm/linux-core/savage_drv.c new file mode 100644 index 000000000..5c7cdd329 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/savage_drv.c @@ -0,0 +1,100 @@ +/* savage_drv.c -- Savage driver for Linux + * + * Copyright 2004 Felix Kuehling + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/config.h> +#include "drmP.h" +#include "savage_drm.h" +#include "savage_drv.h" + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + savage_PCI_IDS +}; + +extern drm_ioctl_desc_t savage_ioctls[]; +extern int savage_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_USE_MTRR | + DRIVER_HAVE_DMA | DRIVER_PCI_DMA, + .dev_priv_size = sizeof(drm_savage_buf_priv_t), + .load = savage_driver_load, + .firstopen = savage_driver_firstopen, + .lastclose = savage_driver_lastclose, + .unload = savage_driver_unload, + .reclaim_buffers = savage_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = savage_ioctls, + .dma_ioctl = savage_bci_buffers, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static int __init savage_init(void) +{ + driver.num_ioctls = savage_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit savage_exit(void) +{ + drm_exit(&driver); +} + +module_init(savage_init); +module_exit(savage_exit); + +MODULE_AUTHOR( DRIVER_AUTHOR ); +MODULE_DESCRIPTION( DRIVER_DESC ); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/sis_drv.c b/nx-X11/extras/drm/linux-core/sis_drv.c new file mode 100644 index 000000000..e524ae2dd --- /dev/null +++ b/nx-X11/extras/drm/linux-core/sis_drv.c @@ -0,0 +1,96 @@ +/* sis.c -- sis driver -*- linux-c -*- + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/config.h> +#include "drmP.h" +#include "sis_drm.h" +#include "sis_drv.h" + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + sis_PCI_IDS +}; + +extern drm_ioctl_desc_t sis_ioctls[]; +extern int sis_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, + .context_ctor = sis_init_context, + .context_dtor = sis_final_context, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = sis_ioctls, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static int __init sis_init(void) +{ + driver.num_ioctls = sis_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit sis_exit(void) +{ + drm_exit(&driver); +} + +module_init(sis_init); +module_exit(sis_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/tdfx_drv.c b/nx-X11/extras/drm/linux-core/tdfx_drv.c new file mode 100644 index 000000000..ce1b7c5ab --- /dev/null +++ b/nx-X11/extras/drm/linux-core/tdfx_drv.c @@ -0,0 +1,94 @@ +/* tdfx_drv.c -- tdfx driver -*- linux-c -*- + * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Daryll Strauss <daryll@valinux.com> + * Gareth Hughes <gareth@valinux.com> + */ + +#include <linux/config.h> +#include "drmP.h" +#include "tdfx_drv.h" + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + tdfx_PCI_IDS +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = DRIVER_USE_MTRR, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + +static int __init tdfx_init(void) +{ + return drm_init(&driver, pciidlist); +} + +static void __exit tdfx_exit(void) +{ + drm_exit(&driver); +} + +module_init(tdfx_init); +module_exit(tdfx_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/nx-X11/extras/drm/linux-core/via_dmablit.c b/nx-X11/extras/drm/linux-core/via_dmablit.c new file mode 100644 index 000000000..6e04cb570 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/via_dmablit.c @@ -0,0 +1,800 @@ +/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro + * + * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Thomas Hellstrom. + * Partially based on code obtained from Digeo Inc. + */ + + +/* + * Unmaps the DMA mappings. + * FIXME: Is this a NoOp on x86? Also + * FIXME: What happens if this one is called and a pending blit has previously done + * the same DMA mappings? + */ + +#include "drmP.h" +#include "via_drm.h" +#include "via_drv.h" +#include "via_dmablit.h" + +#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) +#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) +#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) + +typedef struct _drm_via_descriptor { + uint32_t mem_addr; + uint32_t dev_addr; + uint32_t size; + uint32_t next; +} drm_via_descriptor_t; + + +/* + * Unmap a DMA mapping. + */ + + + +static void +via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) +{ + int num_desc = vsg->num_desc; + unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; + unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; + drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + + descriptor_this_page; + dma_addr_t next = vsg->chain_start; + + while(num_desc--) { + if (descriptor_this_page-- == 0) { + cur_descriptor_page--; + descriptor_this_page = vsg->descriptors_per_page - 1; + desc_ptr = vsg->desc_pages[cur_descriptor_page] + + descriptor_this_page; + } + dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); + dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); + next = (dma_addr_t) desc_ptr->next; + desc_ptr--; + } +} + +/* + * If mode = 0, count how many descriptors are needed. + * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. + * Descriptors are run in reverse order by the hardware because we are not allowed to update the + * 'next' field without syncing calls when the descriptor is already mapped. + */ + +static void +via_map_blit_for_device(struct pci_dev *pdev, + const drm_via_dmablit_t *xfer, + drm_via_sg_info_t *vsg, + int mode) +{ + unsigned cur_descriptor_page = 0; + unsigned num_descriptors_this_page = 0; + unsigned char *mem_addr = xfer->mem_addr; + unsigned char *cur_mem; + unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); + uint32_t fb_addr = xfer->fb_addr; + uint32_t cur_fb; + unsigned long line_len; + unsigned remaining_len; + int num_desc = 0; + int cur_line; + dma_addr_t next = 0 | VIA_DMA_DPR_EC; + drm_via_descriptor_t *desc_ptr = 0; + + if (mode == 1) + desc_ptr = vsg->desc_pages[cur_descriptor_page]; + + for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { + + line_len = xfer->line_length; + cur_fb = fb_addr; + cur_mem = mem_addr; + + while (line_len > 0) { + + remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); + line_len -= remaining_len; + + if (mode == 1) { + desc_ptr->mem_addr = + dma_map_page(&pdev->dev, + vsg->pages[VIA_PFN(cur_mem) - + VIA_PFN(first_addr)], + VIA_PGOFF(cur_mem), remaining_len, + vsg->direction); + desc_ptr->dev_addr = cur_fb; + + desc_ptr->size = remaining_len; + desc_ptr->next = (uint32_t) next; + next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), + DMA_TO_DEVICE); + desc_ptr++; + if (++num_descriptors_this_page >= vsg->descriptors_per_page) { + num_descriptors_this_page = 0; + desc_ptr = vsg->desc_pages[++cur_descriptor_page]; + } + } + + num_desc++; + cur_mem += remaining_len; + cur_fb += remaining_len; + } + + mem_addr += xfer->mem_stride; + fb_addr += xfer->fb_stride; + } + + if (mode == 1) { + vsg->chain_start = next; + vsg->state = dr_via_device_mapped; + } + vsg->num_desc = num_desc; +} + +/* + * Function that frees up all resources for a blit. It is usable even if the + * blit info has only be partially built as long as the status enum is consistent + * with the actual status of the used resources. + */ + + +void +via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) +{ + struct page *page; + int i; + + switch(vsg->state) { + case dr_via_device_mapped: + via_unmap_blit_from_device(pdev, vsg); + case dr_via_desc_pages_alloc: + for (i=0; i<vsg->num_desc_pages; ++i) { + if (vsg->desc_pages[i] != NULL) + free_page((unsigned long)vsg->desc_pages[i]); + } + kfree(vsg->desc_pages); + case dr_via_pages_locked: + for (i=0; i<vsg->num_pages; ++i) { + if ( NULL != (page = vsg->pages[i])) { + if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) + SetPageDirty(page); + page_cache_release(page); + } + } + case dr_via_pages_alloc: + vfree(vsg->pages); + default: + vsg->state = dr_via_sg_init; + } + if (vsg->bounce_buffer) { + vfree(vsg->bounce_buffer); + vsg->bounce_buffer = NULL; + } + vsg->free_on_sequence = 0; +} + +/* + * Fire a blit engine. + */ + +static void +via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine) +{ + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; + + VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); + VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | + VIA_DMA_CSR_DE); + VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); + VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); + VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); +} + +/* + * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will + * occur here if the calling user does not have access to the submitted address. + */ + +static int +via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) +{ + int ret; + unsigned long first_pfn = VIA_PFN(xfer->mem_addr); + vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - + first_pfn + 1; + + if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) + return DRM_ERR(ENOMEM); + memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); + down_read(¤t->mm->mmap_sem); + ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, + vsg->num_pages, vsg->direction, 0, vsg->pages, NULL); + + up_read(¤t->mm->mmap_sem); + if (ret != vsg->num_pages) { + if (ret < 0) + return ret; + vsg->state = dr_via_pages_locked; + return DRM_ERR(EINVAL); + } + vsg->state = dr_via_pages_locked; + DRM_DEBUG("DMA pages locked\n"); + return 0; +} + +/* + * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the + * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be + * quite large for some blits, and pages don't need to be contingous. + */ + +static int +via_alloc_desc_pages(drm_via_sg_info_t *vsg) +{ + int i; + + vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); + vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / + vsg->descriptors_per_page; + + if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) + return DRM_ERR(ENOMEM); + + memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages); + vsg->state = dr_via_desc_pages_alloc; + for (i=0; i<vsg->num_desc_pages; ++i) { + if (NULL == (vsg->desc_pages[i] = + (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) + return DRM_ERR(ENOMEM); + } + DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, + vsg->num_desc); + return 0; +} + +static void +via_abort_dmablit(drm_device_t *dev, int engine) +{ + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; + + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); +} + +static void +via_dmablit_engine_off(drm_device_t *dev, int engine) +{ + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; + + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); +} + + + +/* + * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. + * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue + * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while + * the workqueue task takes care of processing associated with the old blit. + */ + +void +via_dmablit_handler(drm_device_t *dev, int engine, int from_irq) +{ + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; + drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; + int cur; + int done_transfer; + unsigned long irqsave=0; + uint32_t status = 0; + + DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", + engine, from_irq, (unsigned long) blitq); + + if (from_irq) { + spin_lock(&blitq->blit_lock); + } else { + spin_lock_irqsave(&blitq->blit_lock, irqsave); + } + + done_transfer = blitq->is_active && + (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); + done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); + + cur = blitq->cur; + if (done_transfer) { + + blitq->blits[cur]->aborted = blitq->aborting; + blitq->done_blit_handle++; + DRM_WAKEUP(blitq->blit_queue + cur); + + cur++; + if (cur >= VIA_NUM_BLIT_SLOTS) + cur = 0; + blitq->cur = cur; + + /* + * Clear transfer done flag. + */ + + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); + + blitq->is_active = 0; + blitq->aborting = 0; + schedule_work(&blitq->wq); + + } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { + + /* + * Abort transfer after one second. + */ + + via_abort_dmablit(dev, engine); + blitq->aborting = 1; + blitq->end = jiffies + DRM_HZ; + } + + if (!blitq->is_active) { + if (blitq->num_outstanding) { + via_fire_dmablit(dev, blitq->blits[cur], engine); + blitq->is_active = 1; + blitq->cur = cur; + blitq->num_outstanding--; + blitq->end = jiffies + DRM_HZ; + if (!timer_pending(&blitq->poll_timer)) { + blitq->poll_timer.expires = jiffies+1; + add_timer(&blitq->poll_timer); + } + } else { + if (timer_pending(&blitq->poll_timer)) { + del_timer(&blitq->poll_timer); + } + via_dmablit_engine_off(dev, engine); + } + } + + if (from_irq) { + spin_unlock(&blitq->blit_lock); + } else { + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); + } +} + + + +/* + * Check whether this blit is still active, performing necessary locking. + */ + +static int +via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) +{ + unsigned long irqsave; + uint32_t slot; + int active; + + spin_lock_irqsave(&blitq->blit_lock, irqsave); + + /* + * Allow for handle wraparounds. + */ + + active = ((blitq->done_blit_handle - handle) > (1 << 23)) && + ((blitq->cur_blit_handle - handle) <= (1 << 23)); + + if (queue && active) { + slot = handle - blitq->done_blit_handle + blitq->cur -1; + if (slot >= VIA_NUM_BLIT_SLOTS) { + slot -= VIA_NUM_BLIT_SLOTS; + } + *queue = blitq->blit_queue + slot; + } + + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); + + return active; +} + +/* + * Sync. Wait for at least three seconds for the blit to be performed. + */ + +static int +via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) +{ + + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; + drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; + wait_queue_head_t *queue; + int ret = 0; + + if (via_dmablit_active(blitq, engine, handle, &queue)) { + DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, + !via_dmablit_active(blitq, engine, handle, NULL)); + } + DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", + handle, engine, ret); + + return ret; +} + + +/* + * A timer that regularly polls the blit engine in cases where we don't have interrupts: + * a) Broken hardware (typically those that don't have any video capture facility). + * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. + * The timer and hardware IRQ's can and do work in parallel. If the hardware has + * irqs, it will shorten the latency somewhat. + */ + + + +static void +via_dmablit_timer(unsigned long data) +{ + drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; + drm_device_t *dev = blitq->dev; + int engine = (int) + (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); + + DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, + (unsigned long) jiffies); + + via_dmablit_handler(dev, engine, 0); + + if (!timer_pending(&blitq->poll_timer)) { + blitq->poll_timer.expires = jiffies+1; + add_timer(&blitq->poll_timer); + } + via_dmablit_handler(dev, engine, 0); + +} + + + + +/* + * Workqueue task that frees data and mappings associated with a blit. + * Also wakes up waiting processes. Each of these tasks handles one + * blit engine only and may not be called on each interrupt. + */ + + +static void +via_dmablit_workqueue(void *data) +{ + drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; + drm_device_t *dev = blitq->dev; + unsigned long irqsave; + drm_via_sg_info_t *cur_sg; + int cur_released; + + + DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) + (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); + + spin_lock_irqsave(&blitq->blit_lock, irqsave); + + while(blitq->serviced != blitq->cur) { + + cur_released = blitq->serviced++; + + DRM_DEBUG("Releasing blit slot %d\n", cur_released); + + if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) + blitq->serviced = 0; + + cur_sg = blitq->blits[cur_released]; + blitq->num_free++; + + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); + + DRM_WAKEUP(&blitq->busy_queue); + + via_free_sg_info(dev->pdev, cur_sg); + kfree(cur_sg); + + spin_lock_irqsave(&blitq->blit_lock, irqsave); + } + + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); +} + + +/* + * Init all blit engines. Currently we use two, but some hardware have 4. + */ + + +void +via_init_dmablit(drm_device_t *dev) +{ + int i,j; + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; + drm_via_blitq_t *blitq; + + pci_set_master(dev->pdev); + + for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { + blitq = dev_priv->blit_queues + i; + blitq->dev = dev; + blitq->cur_blit_handle = 0; + blitq->done_blit_handle = 0; + blitq->head = 0; + blitq->cur = 0; + blitq->serviced = 0; + blitq->num_free = VIA_NUM_BLIT_SLOTS; + blitq->num_outstanding = 0; + blitq->is_active = 0; + blitq->aborting = 0; + blitq->blit_lock = SPIN_LOCK_UNLOCKED; + for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { + DRM_INIT_WAITQUEUE(blitq->blit_queue + j); + } + DRM_INIT_WAITQUEUE(&blitq->busy_queue); + INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); + init_timer(&blitq->poll_timer); + blitq->poll_timer.function = &via_dmablit_timer; + blitq->poll_timer.data = (unsigned long) blitq; + } +} + +/* + * Build all info and do all mappings required for a blit. + */ + + +static int +via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) +{ + int draw = xfer->to_fb; + int ret = 0; + + vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + vsg->bounce_buffer = 0; + + vsg->state = dr_via_sg_init; + + if (xfer->num_lines <= 0 || xfer->line_length <= 0) { + DRM_ERROR("Zero size bitblt.\n"); + return DRM_ERR(EINVAL); + } + + /* + * Below check is a driver limitation, not a hardware one. We + * don't want to lock unused pages, and don't want to incoporate the + * extra logic of avoiding them. Make sure there are no. + * (Not a big limitation anyway.) + */ + + if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) || + (xfer->mem_stride > 2048)) { + DRM_ERROR("Too large system memory stride.\n"); + return DRM_ERR(EINVAL); + } + + if (xfer->num_lines > 2048) { + DRM_ERROR("Too many PCI DMA bitblt lines.\n"); + return DRM_ERR(EINVAL); + } + + /* + * we allow a negative fb stride to allow flipping of images in + * transfer. + */ + + if (xfer->mem_stride < xfer->line_length || + abs(xfer->fb_stride) < xfer->line_length) { + DRM_ERROR("Invalid frame-buffer / memory stride.\n"); + return DRM_ERR(EINVAL); + } + + /* + * A hardware bug seems to be worked around if system memory addresses start on + * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted + * about this. Meanwhile, impose the following restrictions: + */ + +#ifdef VIA_BUGFREE + if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || + ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) { + DRM_ERROR("Invalid DRM bitblt alignment.\n"); + return DRM_ERR(EINVAL); + } +#else + if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 15))) { + DRM_ERROR("Invalid DRM bitblt alignment.\n"); + return DRM_ERR(EINVAL); + } +#endif + + if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { + DRM_ERROR("Could not lock DMA pages.\n"); + via_free_sg_info(dev->pdev, vsg); + return ret; + } + + via_map_blit_for_device(dev->pdev, xfer, vsg, 0); + if (0 != (ret = via_alloc_desc_pages(vsg))) { + DRM_ERROR("Could not allocate DMA descriptor pages.\n"); + via_free_sg_info(dev->pdev, vsg); + return ret; + } + via_map_blit_for_device(dev->pdev, xfer, vsg, 1); + + return 0; +} + + +/* + * Reserve one free slot in the blit queue. Will wait for one second for one + * to become available. Otherwise -EBUSY is returned. + */ + +static int +via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) +{ + int ret=0; + unsigned long irqsave; + + DRM_DEBUG("Num free is %d\n", blitq->num_free); + spin_lock_irqsave(&blitq->blit_lock, irqsave); + while(blitq->num_free == 0) { + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); + + DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); + if (ret) { + return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; + } + + spin_lock_irqsave(&blitq->blit_lock, irqsave); + } + + blitq->num_free--; + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); + + return 0; +} + +/* + * Hand back a free slot if we changed our mind. + */ + +static void +via_dmablit_release_slot(drm_via_blitq_t *blitq) +{ + unsigned long irqsave; + + spin_lock_irqsave(&blitq->blit_lock, irqsave); + blitq->num_free++; + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); + DRM_WAKEUP( &blitq->busy_queue ); +} + +/* + * Grab a free slot. Build blit info and queue a blit. + */ + + +static int +via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) +{ + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; + drm_via_sg_info_t *vsg; + drm_via_blitq_t *blitq; + int ret; + int engine; + unsigned long irqsave; + + if (dev_priv == NULL) { + DRM_ERROR("Called without initialization.\n"); + return DRM_ERR(EINVAL); + } + + engine = (xfer->to_fb) ? 0 : 1; + blitq = dev_priv->blit_queues + engine; + if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) { + return ret; + } + if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { + via_dmablit_release_slot(blitq); + return DRM_ERR(ENOMEM); + } + if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { + via_dmablit_release_slot(blitq); + kfree(vsg); + return ret; + } + spin_lock_irqsave(&blitq->blit_lock, irqsave); + + blitq->blits[blitq->head++] = vsg; + if (blitq->head >= VIA_NUM_BLIT_SLOTS) + blitq->head = 0; + blitq->num_outstanding++; + xfer->sync.sync_handle = ++blitq->cur_blit_handle; + + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); + xfer->sync.engine = engine; + + via_dmablit_handler(dev, engine, 0); + + return 0; +} + +/* + * Sync on a previously submitted blit. Note that the X server use signals extensively, and + * that there is a very big proability that this IOCTL will be interrupted by a signal. In that + * case it returns with -EAGAIN for the signal to be delivered. + * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). + */ + +int +via_dma_blit_sync( DRM_IOCTL_ARGS ) +{ + drm_via_blitsync_t sync; + int err; + DRM_DEVICE; + + DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); + + if (sync.engine >= VIA_NUM_BLIT_ENGINES) + return DRM_ERR(EINVAL); + + err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); + + if (DRM_ERR(EINTR) == err) + err = DRM_ERR(EAGAIN); + + return err; +} + + +/* + * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal + * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should + * be reissued. See the above IOCTL code. + */ + +int +via_dma_blit( DRM_IOCTL_ARGS ) +{ + drm_via_dmablit_t xfer; + int err; + DRM_DEVICE; + + DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer)); + + err = via_dmablit(dev, &xfer); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); + + return err; +} diff --git a/nx-X11/extras/drm/linux-core/via_dmablit.h b/nx-X11/extras/drm/linux-core/via_dmablit.h new file mode 100644 index 000000000..648639174 --- /dev/null +++ b/nx-X11/extras/drm/linux-core/via_dmablit.h @@ -0,0 +1,138 @@ +/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro + * + * Copyright 2005 Thomas Hellstrom. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Thomas Hellstrom. + * Register info from Digeo Inc. + */ + +#ifndef _VIA_DMABLIT_H +#define _VIA_DMABLIT_H + +#define VIA_NUM_BLIT_ENGINES 2 +#define VIA_NUM_BLIT_SLOTS 8 + +struct _drm_via_descriptor; + +typedef struct _drm_via_sg_info { + struct page **pages; + unsigned long num_pages; + struct _drm_via_descriptor **desc_pages; + int num_desc_pages; + int num_desc; + enum dma_data_direction direction; + unsigned char *bounce_buffer; + dma_addr_t chain_start; + uint32_t free_on_sequence; + unsigned int descriptors_per_page; + int aborted; + enum { + dr_via_device_mapped, + dr_via_desc_pages_alloc, + dr_via_pages_locked, + dr_via_pages_alloc, + dr_via_sg_init + } state; +} drm_via_sg_info_t; + +typedef struct _drm_via_blitq { + drm_device_t *dev; + uint32_t cur_blit_handle; + uint32_t done_blit_handle; + unsigned serviced; + unsigned head; + unsigned cur; + unsigned num_free; + unsigned num_outstanding; + unsigned long end; + int aborting; + int is_active; + drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS]; + spinlock_t blit_lock; + wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS]; + wait_queue_head_t busy_queue; + struct work_struct wq; + struct timer_list poll_timer; +} drm_via_blitq_t; + + +/* + * PCI DMA Registers + * Channels 2 & 3 don't seem to be implemented in hardware. + */ + +#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */ +#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */ +#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */ +#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */ + +#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */ +#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */ +#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */ +#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */ + +#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */ +#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */ +#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */ +#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */ + +#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */ +#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */ +#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */ +#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */ + +#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */ +#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */ +#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */ +#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */ + +#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */ +#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */ +#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */ +#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */ + +#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */ + +/* Define for DMA engine */ +/* DPR */ +#define VIA_DMA_DPR_EC (1<<1) /* end of chain */ +#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */ +#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */ + +/* MR */ +#define VIA_DMA_MR_CM (1<<0) /* chaining mode */ +#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */ +#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */ + +/* CSR */ +#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */ +#define VIA_DMA_CSR_TS (1<<1) /* transfer start */ +#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */ +#define VIA_DMA_CSR_TD (1<<3) /* transfer done */ +#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */ +#define VIA_DMA_DPR_EC (1<<1) /* end of chain */ + + + +#endif |